@@ -133,7 +133,7 @@ void OrtModel::reset(std::unordered_map<std::string, std::string> optionsMap)
133133 },
134134 (void *)3 );
135135 (pImplOrt->env )->DisableTelemetryEvents (); // Disable telemetry events
136- ( pImplOrt->session ). reset ( new Ort::Session{*(pImplOrt->env ), modelPath.c_str (), pImplOrt->sessionOptions });
136+ pImplOrt->session = std::make_shared< Ort::Session>( {*(pImplOrt->env ), modelPath.c_str (), pImplOrt->sessionOptions });
137137
138138 for (size_t i = 0 ; i < (pImplOrt->session )->GetInputCount (); ++i) {
139139 mInputNames .push_back ((pImplOrt->session )->GetInputNameAllocated (i, pImplOrt->allocator ).get ());
@@ -169,7 +169,7 @@ void OrtModel::reset(std::unordered_map<std::string, std::string> optionsMap)
169169
170170void OrtModel::resetSession ()
171171{
172- ( pImplOrt->session ). reset ( new Ort::Session{*(pImplOrt->env ), modelPath.c_str (), pImplOrt->sessionOptions });
172+ pImplOrt->session = std::make_shared< Ort::Session>( {*(pImplOrt->env ), modelPath.c_str (), pImplOrt->sessionOptions });
173173}
174174
175175template <class I , class O >
@@ -180,116 +180,125 @@ std::vector<O> OrtModel::v2v(std::vector<I>& input, bool clearInput)
180180 } else {
181181 std::vector<O> output (input.size ());
182182 std::transform (std::begin (input), std::end (input), std::begin (output), [](I f) { return O (f); });
183- if (clearInput)
183+ if (clearInput){
184184 input.clear ();
185+ }
185186 return output;
186187 }
187188}
188189
189190template <class I , class O > // class I is the input data type, e.g. float, class O is the output data type, e.g. O2::gpu::OrtDataType::Float16_t from O2/GPU/GPUTracking/ML/convert_float16.h
190191std::vector<O> OrtModel::inference (std::vector<I>& input)
191192{
192- std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
193+ std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
193194 std::vector<Ort::Value> inputTensor;
194- inputTensor.emplace_back (Ort::Value::CreateTensor<O>(pImplOrt->memoryInfo , ( reinterpret_cast <O*>(input)) .data (), input.size (), inputShape.data (), inputShape.size ()));
195+ inputTensor.emplace_back (Ort::Value::CreateTensor<O>(pImplOrt->memoryInfo , reinterpret_cast <O*>(input.data () ), input.size (), inputShape.data (), inputShape.size ()));
195196 // input.clear();
196197 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
197198 O* outputValues = reinterpret_cast <O*>(outputTensors[0 ].template GetTensorMutableData <O>());
199+ std::vector<O> outputValuesVec{outputValues, outputValues + inputShape[0 ] * mOutputShapes [0 ][1 ]};
198200 outputTensors.clear ();
199- return std::vector<O>{outputValues, outputValues + inputShape[ 0 ] * mOutputShapes [ 0 ][ 1 ]} ;
201+ return outputValuesVec ;
200202}
201203
202204template <class I , class O > // class I is the input data type, e.g. float, class O is the output data type, e.g. O2::gpu::OrtDataType::Float16_t from O2/GPU/GPUTracking/ML/convert_float16.h
203205std::vector<O> OrtModel::inference (std::vector<std::vector<I>>& input)
204206{
205207 std::vector<Ort::Value> inputTensor;
206208 for (auto i : input) {
207- std::vector<int64_t > inputShape{(int64_t )(i.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
208- inputTensor.emplace_back (Ort::Value::CreateTensor<O>(pImplOrt->memoryInfo , ( reinterpret_cast <O*>(i)) .data (), i.size (), inputShape.data (), inputShape.size ()));
209+ std::vector<int64_t > inputShape{(int64_t )(i.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
210+ inputTensor.emplace_back (Ort::Value::CreateTensor<O>(pImplOrt->memoryInfo , reinterpret_cast <O*>(i.data () ), i.size (), inputShape.data (), inputShape.size ()));
209211 }
210212 // input.clear();
211213 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
212214 O* outputValues = reinterpret_cast <O*>(outputTensors[0 ].template GetTensorMutableData <O>());
215+ std::vector<O> outputValuesVec{outputValues, outputValues + inputTensor.size () / mInputShapes [0 ][1 ] * mOutputShapes [0 ][1 ]};
213216 outputTensors.clear ();
214- return std::vector<O>{outputValues, outputValues + inputTensor. size () / mInputShapes [ 0 ][ 1 ] * mOutputShapes [ 0 ][ 1 ]} ;
217+ return outputValuesVec ;
215218}
216219
217220std::string OrtModel::printShape (const std::vector<int64_t >& v)
218221{
219222 std::stringstream ss (" " );
220- for (size_t i = 0 ; i < v.size () - 1 ; i++)
223+ for (size_t i = 0 ; i < v.size () - 1 ; i++) {
221224 ss << v[i] << " x" ;
225+ }
222226 ss << v[v.size () - 1 ];
223227 return ss.str ();
224228}
225229
226230template <>
227231std::vector<float > OrtModel::inference<float , float >(std::vector<float >& input)
228232{
229- std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
233+ std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
230234 std::vector<Ort::Value> inputTensor;
231235 inputTensor.emplace_back (Ort::Value::CreateTensor<float >(pImplOrt->memoryInfo , input.data (), input.size (), inputShape.data (), inputShape.size ()));
232236 // input.clear();
233237 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
234238 float * outputValues = outputTensors[0 ].template GetTensorMutableData <float >();
239+ std::vector<float > outputValuesVec{outputValues, outputValues + inputShape[0 ] * mOutputShapes [0 ][1 ]};
235240 outputTensors.clear ();
236- return std::vector< float >{outputValues, outputValues + inputShape[ 0 ] * mOutputShapes [ 0 ][ 1 ]} ;
241+ return outputValuesVec ;
237242}
238243
239244template <>
240245std::vector<float > OrtModel::inference<OrtDataType::Float16_t, float >(std::vector<OrtDataType::Float16_t>& input)
241246{
242- std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
247+ std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
243248 std::vector<Ort::Value> inputTensor;
244249 inputTensor.emplace_back (Ort::Value::CreateTensor<Ort::Float16_t>(pImplOrt->memoryInfo , reinterpret_cast <Ort::Float16_t*>(input.data ()), input.size (), inputShape.data (), inputShape.size ()));
245250 // input.clear();
246251 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
247252 float * outputValues = outputTensors[0 ].template GetTensorMutableData <float >();
253+ std::vector<float > outputValuesVec{outputValues, outputValues + inputShape[0 ] * mOutputShapes [0 ][1 ]};
248254 outputTensors.clear ();
249- return std::vector< float >{outputValues, outputValues + inputShape[ 0 ] * mOutputShapes [ 0 ][ 1 ]} ;
255+ return outputValuesVec ;
250256}
251257
252258template <>
253259std::vector<OrtDataType::Float16_t> OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(std::vector<OrtDataType::Float16_t>& input)
254260{
255- std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
261+ std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
256262 std::vector<Ort::Value> inputTensor;
257263 inputTensor.emplace_back (Ort::Value::CreateTensor<Ort::Float16_t>(pImplOrt->memoryInfo , reinterpret_cast <Ort::Float16_t*>(input.data ()), input.size (), inputShape.data (), inputShape.size ()));
258264 // input.clear();
259265 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
260266 OrtDataType::Float16_t* outputValues = reinterpret_cast <OrtDataType::Float16_t*>(outputTensors[0 ].template GetTensorMutableData <Ort::Float16_t>());
267+ std::vector<OrtDataType::Float16_t> outputValuesVec{outputValues, outputValues + inputShape[0 ] * mOutputShapes [0 ][1 ]};
261268 outputTensors.clear ();
262- return std::vector<OrtDataType::Float16_t>{outputValues, outputValues + inputShape[ 0 ] * mOutputShapes [ 0 ][ 1 ]} ;
269+ return outputValuesVec ;
263270}
264271
265272template <>
266273std::vector<OrtDataType::Float16_t> OrtModel::inference<float , OrtDataType::Float16_t>(std::vector<float >& input)
267274{
268- std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
275+ std::vector<int64_t > inputShape{(int64_t )(input.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
269276 std::vector<Ort::Value> inputTensor;
270277 inputTensor.emplace_back (Ort::Value::CreateTensor<Ort::Float16_t>(pImplOrt->memoryInfo , reinterpret_cast <Ort::Float16_t*>(input.data ()), input.size (), inputShape.data (), inputShape.size ()));
271278 // input.clear();
272279 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
273280 OrtDataType::Float16_t* outputValues = reinterpret_cast <OrtDataType::Float16_t*>(outputTensors[0 ].template GetTensorMutableData <Ort::Float16_t>());
281+ std::vector<OrtDataType::Float16_t> outputValuesVec{outputValues, outputValues + inputShape[0 ] * mOutputShapes [0 ][1 ]};
274282 outputTensors.clear ();
275- return std::vector<OrtDataType::Float16_t>{outputValues, outputValues + inputShape[ 0 ] * mOutputShapes [ 0 ][ 1 ]} ;
283+ return outputValuesVec ;
276284}
277285
278286template <>
279287std::vector<OrtDataType::Float16_t> OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(std::vector<std::vector<OrtDataType::Float16_t>>& input)
280288{
281289 std::vector<Ort::Value> inputTensor;
282290 for (auto i : input) {
283- std::vector<int64_t > inputShape{(int64_t )(i.size () / mInputShapes [0 ][1 ]), mInputShapes [0 ][1 ]};
291+ std::vector<int64_t > inputShape{(int64_t )(i.size () / mInputShapes [0 ][1 ]), ( int64_t ) mInputShapes [0 ][1 ]};
284292 inputTensor.emplace_back (Ort::Value::CreateTensor<Ort::Float16_t>(pImplOrt->memoryInfo , reinterpret_cast <Ort::Float16_t*>(i.data ()), i.size (), inputShape.data (), inputShape.size ()));
285293 }
286294 // input.clear();
287295 auto outputTensors = (pImplOrt->session )->Run (pImplOrt->runOptions , inputNamesChar.data (), inputTensor.data (), inputTensor.size (), outputNamesChar.data (), outputNamesChar.size ());
288296 OrtDataType::Float16_t* outputValues = reinterpret_cast <OrtDataType::Float16_t*>(outputTensors[0 ].template GetTensorMutableData <Ort::Float16_t>());
297+ std::vector<OrtDataType::Float16_t> outputValuesVec{outputValues, outputValues + inputTensor.size () / mInputShapes [0 ][1 ] * mOutputShapes [0 ][1 ]};
289298 outputTensors.clear ();
290- return std::vector<OrtDataType::Float16_t>{outputValues, outputValues + inputTensor. size () / mInputShapes [ 0 ][ 1 ] * mOutputShapes [ 0 ][ 1 ]} ;
299+ return outputValuesVec ;
291300}
292301
293302} // namespace ml
294303
295- } // namespace o2
304+ } // namespace o2
0 commit comments