Skip to content

Commit 6e9e60f

Browse files
authored
[Common,PWGHF,PWGJE,Trigger,Tutorial] Drop support for old ONNXRuntime API (#11654)
1 parent 55b21d9 commit 6e9e60f

File tree

9 files changed

+1
-134
lines changed

9 files changed

+1
-134
lines changed

Common/TableProducer/mftmchMatchingML.cxx

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,7 @@
1010
// or submit itself to any jurisdiction.
1111

1212
#include <math.h>
13-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
14-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
15-
#else
1613
#include <onnxruntime_cxx_api.h>
17-
#endif
1814
#include <string>
1915
#include <regex>
2016
#include <TLorentzVector.h>
@@ -77,11 +73,7 @@ struct mftmchMatchingML {
7773

7874
Ort::Env env{ORT_LOGGING_LEVEL_WARNING, "model-explorer"};
7975
Ort::SessionOptions session_options;
80-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
81-
std::shared_ptr<Ort::Experimental::Session> onnx_session = nullptr;
82-
#else
8376
std::shared_ptr<Ort::Session> onnx_session = nullptr;
84-
#endif
8577
OnnxModel model;
8678

8779
template <typename F, typename M>
@@ -158,12 +150,6 @@ struct mftmchMatchingML {
158150
std::vector<std::string> output_names;
159151
std::vector<std::vector<int64_t>> output_shapes;
160152

161-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
162-
input_names = onnx_session->GetInputNames();
163-
input_shapes = onnx_session->GetInputShapes();
164-
output_names = onnx_session->GetOutputNames();
165-
output_shapes = onnx_session->GetOutputShapes();
166-
#else
167153
Ort::AllocatorWithDefaultOptions tmpAllocator;
168154
for (size_t i = 0; i < onnx_session->GetInputCount(); ++i) {
169155
input_names.push_back(onnx_session->GetInputNameAllocated(i, tmpAllocator).get());
@@ -177,7 +163,6 @@ struct mftmchMatchingML {
177163
for (size_t i = 0; i < onnx_session->GetOutputCount(); ++i) {
178164
output_shapes.emplace_back(onnx_session->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
179165
}
180-
#endif
181166

182167
auto input_shape = input_shapes[0];
183168
input_shape[0] = 1;
@@ -187,11 +172,6 @@ struct mftmchMatchingML {
187172

188173
if (input_tensor_values[8] < cfgXYWindow) {
189174
std::vector<Ort::Value> input_tensors;
190-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
191-
input_tensors.push_back(Ort::Experimental::Value::CreateTensor<float>(input_tensor_values.data(), input_tensor_values.size(), input_shape));
192-
193-
std::vector<Ort::Value> output_tensors = onnx_session->Run(input_names, input_tensors, output_names);
194-
#else
195175
Ort::MemoryInfo mem_info =
196176
Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
197177
input_tensors.push_back(Ort::Value::CreateTensor<float>(mem_info, input_tensor_values.data(), input_tensor_values.size(), input_shape.data(), input_shape.size()));
@@ -206,7 +186,6 @@ struct mftmchMatchingML {
206186
[&](const std::string& str) { return str.c_str(); });
207187

208188
std::vector<Ort::Value> output_tensors = onnx_session->Run(runOptions, inputNamesChar.data(), input_tensors.data(), input_tensors.size(), outputNamesChar.data(), outputNamesChar.size());
209-
#endif
210189

211190
const float* output_value = output_tensors[0].GetTensorData<float>();
212191

EventFiltering/PWGHF/HFFilterPrepareMLSamples.cxx

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,7 @@
2828
#include "Common/DataModel/PIDResponseTPC.h"
2929
#include "Common/DataModel/TrackSelectionTables.h"
3030

31-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
32-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h> // needed for HFFilterHelpers, to be fixed
33-
#else
3431
#include <onnxruntime_cxx_api.h>
35-
#endif
3632

3733
#include <CCDB/BasicCCDBManager.h>
3834
#include <CCDB/CcdbApi.h>

PWGHF/TableProducer/candidateSelectorLcPidMl.cxx

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -127,15 +127,11 @@ struct HfCandidateSelectorLcPidMl {
127127
}
128128
if (retrieveSuccess) {
129129
auto session = model.getSession();
130-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
131-
auto inputShapes = session->GetInputShapes();
132-
#else
133130
std::vector<std::vector<int64_t>> inputShapes;
134131
Ort::AllocatorWithDefaultOptions tmpAllocator;
135132
for (size_t i = 0; i < session->GetInputCount(); ++i) {
136133
inputShapes.emplace_back(session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
137134
}
138-
#endif
139135
if (inputShapes[0][0] < 0) {
140136
LOGF(warning, "Model for Lc with negative input shape likely because converted with hummingbird, setting it to 1.");
141137
inputShapes[0][0] = 1;

PWGJE/Core/MlResponseHfTagging.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,7 @@
1818

1919
#include "Tools/ML/MlResponse.h"
2020

21-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
22-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
23-
#else
2421
#include <onnxruntime_cxx_api.h>
25-
#endif
2622

2723
#include <Framework/Logger.h>
2824

@@ -333,25 +329,17 @@ class MlResponseHfTagging : public MlResponse<TypeOutputScore>
333329
class TensorAllocator
334330
{
335331
protected:
336-
#if !__has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
337332
Ort::MemoryInfo memInfo;
338-
#endif
339333
public:
340334
TensorAllocator()
341-
#if !__has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
342335
: memInfo(Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault))
343-
#endif
344336
{
345337
}
346338
~TensorAllocator() = default;
347339
template <typename T>
348340
Ort::Value createTensor(std::vector<T>& input, std::vector<int64_t>& inputShape)
349341
{
350-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
351-
return Ort::Experimental::Value::CreateTensor<T>(input.data(), input.size(), inputShape);
352-
#else
353342
return Ort::Value::CreateTensor<T>(memInfo, input.data(), input.size(), inputShape.data(), inputShape.size());
354-
#endif
355343
}
356344
};
357345

Tools/ML/MlResponse.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,7 @@
1717
#ifndef TOOLS_ML_MLRESPONSE_H_
1818
#define TOOLS_ML_MLRESPONSE_H_
1919

20-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
21-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
22-
#else
2320
#include <onnxruntime_cxx_api.h>
24-
#endif
2521

2622
#include <map>
2723
#include <string>

Tools/ML/model.cxx

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,18 +75,8 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
7575
}
7676

7777
mEnv = std::make_shared<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "onnx-model");
78-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
79-
mSession = std::make_shared<Ort::Experimental::Session>(*mEnv, modelPath, sessionOptions);
80-
#else
8178
mSession = std::make_shared<Ort::Session>(*mEnv, modelPath.c_str(), sessionOptions);
82-
#endif
83-
84-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
85-
mInputNames = mSession->GetInputNames();
86-
mInputShapes = mSession->GetInputShapes();
87-
mOutputNames = mSession->GetOutputNames();
88-
mOutputShapes = mSession->GetOutputShapes();
89-
#else
79+
9080
Ort::AllocatorWithDefaultOptions tmpAllocator;
9181
for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
9282
mInputNames.push_back(mSession->GetInputNameAllocated(i, tmpAllocator).get());
@@ -100,7 +90,6 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
10090
for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
10191
mOutputShapes.emplace_back(mSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
10292
}
103-
#endif
10493
LOG(info) << "Input Nodes:";
10594
for (size_t i = 0; i < mInputNames.size(); i++) {
10695
LOG(info) << "\t" << mInputNames[i] << " : " << printShape(mInputShapes[i]);

Tools/ML/model.h

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,7 @@
2121
#define TOOLS_ML_MODEL_H_
2222

2323
// C++ and system includes
24-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
25-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
26-
#else
2724
#include <onnxruntime_cxx_api.h>
28-
#endif
2925
#include <vector>
3026
#include <string>
3127
#include <memory>
@@ -62,9 +58,6 @@ class OnnxModel
6258
// assert(input[0].GetTensorTypeAndShapeInfo().GetShape() == getNumInputNodes()); --> Fails build in debug mode, TODO: assertion should be checked somehow
6359

6460
try {
65-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
66-
auto outputTensors = mSession->Run(mInputNames, input, mOutputNames);
67-
#else
6861
Ort::RunOptions runOptions;
6962
std::vector<const char*> inputNamesChar(mInputNames.size(), nullptr);
7063
std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(inputNamesChar),
@@ -74,7 +67,6 @@ class OnnxModel
7467
std::transform(std::begin(mOutputNames), std::end(mOutputNames), std::begin(outputNamesChar),
7568
[&](const std::string& str) { return str.c_str(); });
7669
auto outputTensors = mSession->Run(runOptions, inputNamesChar.data(), input.data(), input.size(), outputNamesChar.data(), outputNamesChar.size());
77-
#endif
7870
LOG(debug) << "Number of output tensors: " << outputTensors.size();
7971
if (outputTensors.size() != mOutputNames.size()) {
8072
LOG(fatal) << "Number of output tensors: " << outputTensors.size() << " does not agree with the model specified size: " << mOutputNames.size();
@@ -100,13 +92,9 @@ class OnnxModel
10092
assert(size % mInputShapes[0][1] == 0);
10193
std::vector<int64_t> inputShape{size / mInputShapes[0][1], mInputShapes[0][1]};
10294
std::vector<Ort::Value> inputTensors;
103-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
104-
inputTensors.emplace_back(Ort::Experimental::Value::CreateTensor<T>(input.data(), size, inputShape));
105-
#else
10695
Ort::MemoryInfo memInfo =
10796
Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
10897
inputTensors.emplace_back(Ort::Value::CreateTensor<T>(memInfo, input.data(), size, inputShape.data(), inputShape.size()));
109-
#endif
11098
LOG(debug) << "Input shape calculated from vector: " << printShape(inputShape);
11199
return evalModel<T>(inputTensors);
112100
}
@@ -117,9 +105,7 @@ class OnnxModel
117105
{
118106
std::vector<Ort::Value> inputTensors;
119107

120-
#if !__has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
121108
Ort::MemoryInfo memInfo = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
122-
#endif
123109

124110
for (size_t iinput = 0; iinput < input.size(); iinput++) {
125111
[[maybe_unused]] int totalSize = 1;
@@ -134,36 +120,24 @@ class OnnxModel
134120
inputShape.push_back(mInputShapes[iinput][idim]);
135121
}
136122

137-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
138-
inputTensors.emplace_back(Ort::Experimental::Value::CreateTensor<T>(input[iinput].data(), size, inputShape));
139-
#else
140123
inputTensors.emplace_back(Ort::Value::CreateTensor<T>(memInfo, input[iinput].data(), size, inputShape.data(), inputShape.size()));
141-
#endif
142124
}
143125

144126
return evalModel<T>(inputTensors);
145127
}
146128

147129
// Reset session
148-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
149-
void resetSession() { mSession.reset(new Ort::Experimental::Session{*mEnv, modelPath, sessionOptions}); }
150-
#else
151130
void resetSession()
152131
{
153132
mSession.reset(new Ort::Session{*mEnv, modelPath.c_str(), sessionOptions});
154133
}
155-
#endif
156134

157135
// Getters & Setters
158136
Ort::SessionOptions* getSessionOptions() { return &sessionOptions; } // For optimizations in post
159-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
160-
std::shared_ptr<Ort::Experimental::Session> getSession() { return mSession; }
161-
#else
162137
std::shared_ptr<Ort::Session> getSession()
163138
{
164139
return mSession;
165140
}
166-
#endif
167141
int getNumInputNodes() const { return mInputShapes[0][1]; }
168142
std::vector<std::vector<int64_t>> getInputShapes() const { return mInputShapes; }
169143
int getNumOutputNodes() const { return mOutputShapes[0][1]; }
@@ -174,11 +148,7 @@ class OnnxModel
174148
private:
175149
// Environment variables for the ONNX runtime
176150
std::shared_ptr<Ort::Env> mEnv = nullptr;
177-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
178-
std::shared_ptr<Ort::Experimental::Session> mSession = nullptr;
179-
#else
180151
std::shared_ptr<Ort::Session> mSession = nullptr;
181-
#endif
182152
Ort::SessionOptions sessionOptions;
183153

184154
// Input & Output specifications of the loaded network

Tools/PIDML/pidOnnxModel.h

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,7 @@
3131
#include <utility>
3232
#include <memory>
3333
#include <vector>
34-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
35-
#include <onnxruntime/core/session/experimental_onnxruntime_cxx_api.h>
36-
#else
3734
#include <onnxruntime_cxx_api.h>
38-
#endif
3935

4036
#include "rapidjson/document.h"
4137
#include "rapidjson/filereadstream.h"
@@ -93,19 +89,9 @@ struct PidONNXModel {
9389
Ort::SessionOptions sessionOptions;
9490
mEnv = std::make_shared<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "pid-onnx-inferer");
9591
LOG(info) << "Loading ONNX model from file: " << modelFile;
96-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
97-
mSession.reset(new Ort::Experimental::Session{*mEnv, modelFile, sessionOptions});
98-
#else
9992
mSession.reset(new Ort::Session{*mEnv, modelFile.c_str(), sessionOptions});
100-
#endif
10193
LOG(info) << "ONNX model loaded";
10294

103-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
104-
mInputNames = mSession->GetInputNames();
105-
mInputShapes = mSession->GetInputShapes();
106-
mOutputNames = mSession->GetOutputNames();
107-
mOutputShapes = mSession->GetOutputShapes();
108-
#else
10995
Ort::AllocatorWithDefaultOptions tmpAllocator;
11096
for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
11197
mInputNames.push_back(mSession->GetInputNameAllocated(i, tmpAllocator).get());
@@ -119,7 +105,6 @@ struct PidONNXModel {
119105
for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
120106
mOutputShapes.emplace_back(mSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
121107
}
122-
#endif
123108

124109
LOG(debug) << "Input Node Name/Shape (" << mInputNames.size() << "):";
125110
for (size_t i = 0; i < mInputNames.size(); i++) {
@@ -274,22 +259,15 @@ struct PidONNXModel {
274259
std::vector<float> inputTensorValues = getValues(track);
275260
std::vector<Ort::Value> inputTensors;
276261

277-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
278-
inputTensors.emplace_back(Ort::Experimental::Value::CreateTensor<float>(inputTensorValues.data(), inputTensorValues.size(), inputShape));
279-
#else
280262
Ort::MemoryInfo memInfo = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
281263
inputTensors.emplace_back(Ort::Value::CreateTensor<float>(memInfo, inputTensorValues.data(), inputTensorValues.size(), inputShape.data(), inputShape.size()));
282-
#endif
283264

284265
// Double-check the dimensions of the input tensor
285266
assert(inputTensors[0].IsTensor() &&
286267
inputTensors[0].GetTensorTypeAndShapeInfo().GetShape() == inputShape);
287268
LOG(debug) << "input tensor shape: " << printShape(inputTensors[0].GetTensorTypeAndShapeInfo().GetShape());
288269

289270
try {
290-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
291-
auto outputTensors = mSession->Run(mInputNames, inputTensors, mOutputNames);
292-
#else
293271
Ort::RunOptions runOptions;
294272
std::vector<const char*> inputNamesChar(mInputNames.size(), nullptr);
295273
std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(inputNamesChar),
@@ -299,7 +277,6 @@ struct PidONNXModel {
299277
std::transform(std::begin(mOutputNames), std::end(mOutputNames), std::begin(outputNamesChar),
300278
[&](const std::string& str) { return str.c_str(); });
301279
auto outputTensors = mSession->Run(runOptions, inputNamesChar.data(), inputTensors.data(), inputTensors.size(), outputNamesChar.data(), outputNamesChar.size());
302-
#endif
303280

304281
// Double-check the dimensions of the output tensors
305282
// The number of output tensors is equal to the number of output nodes specified in the Run() call
@@ -331,11 +308,7 @@ struct PidONNXModel {
331308

332309
std::shared_ptr<Ort::Env> mEnv = nullptr;
333310
// No empty constructors for Session, we need a pointer
334-
#if __has_include(<onnxruntime/core/session/onnxruntime_cxx_api.h>)
335-
std::shared_ptr<Ort::Experimental::Session> mSession = nullptr;
336-
#else
337311
std::shared_ptr<Ort::Session> mSession = nullptr;
338-
#endif
339312

340313
std::vector<double> mPLimits;
341314
std::vector<std::string> mInputNames;

0 commit comments

Comments
 (0)