Skip to content

Commit 7de6492

Browse files
committed
Fixing member variable naming
1 parent 04baff0 commit 7de6492

File tree

8 files changed

+392
-394
lines changed

8 files changed

+392
-394
lines changed

Common/ML/include/ML/OrtInterface.h

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -70,23 +70,23 @@ class OrtModel
7070
Ort::SessionOptions* getSessionOptions();
7171
Ort::MemoryInfo* getMemoryInfo();
7272
Ort::Env* getEnv();
73-
int32_t getIntraOpNumThreads() const { return intraOpNumThreads; }
74-
int32_t getInterOpNumThreads() const { return interOpNumThreads; }
73+
int32_t getIntraOpNumThreads() const { return mIntraOpNumThreads; }
74+
int32_t getInterOpNumThreads() const { return mInterOpNumThreads; }
7575

7676
// Setters
77-
void setDeviceId(int32_t id) { deviceId = id; }
77+
void setDeviceId(int32_t id) { mDeviceId = id; }
7878
void setIO();
79-
void setActiveThreads(int threads) { intraOpNumThreads = threads; }
79+
void setActiveThreads(int threads) { mIntraOpNumThreads = threads; }
8080
void setIntraOpNumThreads(int threads)
8181
{
82-
if (deviceType == "CPU") {
83-
intraOpNumThreads = threads;
82+
if (mDeviceType == "CPU") {
83+
mIntraOpNumThreads = threads;
8484
}
8585
}
8686
void setInterOpNumThreads(int threads)
8787
{
88-
if (deviceType == "CPU") {
89-
interOpNumThreads = threads;
88+
if (mDeviceType == "CPU") {
89+
mInterOpNumThreads = threads;
9090
}
9191
}
9292
void setEnv(Ort::Env*);
@@ -116,16 +116,16 @@ class OrtModel
116116
OrtVariables* pImplOrt;
117117

118118
// Input & Output specifications of the loaded network
119-
std::vector<const char*> inputNamesChar, outputNamesChar;
119+
std::vector<const char*> mInputNamesChar, mOutputNamesChar;
120120
std::vector<std::string> mInputNames, mOutputNames;
121-
std::vector<std::vector<int64_t>> mInputShapes, mOutputShapes, inputShapesCopy, outputShapesCopy; // Input shapes
122-
std::vector<int64_t> inputSizePerNode, outputSizePerNode; // Output shapes
123-
int32_t mInputsTotal = 0, mOutputsTotal = 0; // Total number of inputs and outputs
121+
std::vector<std::vector<int64_t>> mInputShapes, mOutputShapes, mInputShapesCopy, mOutputShapesCopy; // Input shapes
122+
std::vector<int64_t> mInputSizePerNode, mOutputSizePerNode; // Output shapes
123+
int32_t mInputsTotal = 0, mOutputsTotal = 0; // Total number of inputs and outputs
124124

125125
// Environment settings
126126
bool mInitialized = false;
127-
std::string modelPath, envName = "", deviceType = "CPU", thread_affinity = ""; // device options should be cpu, rocm, migraphx, cuda
128-
int32_t intraOpNumThreads = 1, interOpNumThreads = 1, deviceId = -1, enableProfiling = 0, loggingLevel = 0, allocateDeviceMemory = 0, enableOptimizations = 0;
127+
std::string mModelPath, mEnvName = "", mDeviceType = "CPU", mThreadAffinity = ""; // device options should be cpu, rocm, migraphx, cuda
128+
int32_t mIntraOpNumThreads = 1, mInterOpNumThreads = 1, mDeviceId = -1, mEnableProfiling = 0, mLoggingLevel = 0, mAllocateDeviceMemory = 0, mEnableOptimizations = 0;
129129

130130
std::string printShape(const std::vector<int64_t>&);
131131
std::string printShape(const std::vector<std::vector<int64_t>>&, std::vector<std::string>&);

Common/ML/src/OrtInterface.cxx

Lines changed: 74 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -49,27 +49,27 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
4949
}
5050

5151
if (!optionsMap["model-path"].empty()) {
52-
modelPath = optionsMap["model-path"];
53-
deviceType = (optionsMap.contains("device-type") ? optionsMap["device-type"] : "CPU");
54-
deviceId = (optionsMap.contains("device-id") ? std::stoi(optionsMap["device-id"]) : -1);
55-
allocateDeviceMemory = (optionsMap.contains("allocate-device-memory") ? std::stoi(optionsMap["allocate-device-memory"]) : 0);
56-
intraOpNumThreads = (optionsMap.contains("intra-op-num-threads") ? std::stoi(optionsMap["intra-op-num-threads"]) : 0);
57-
interOpNumThreads = (optionsMap.contains("inter-op-num-threads") ? std::stoi(optionsMap["inter-op-num-threads"]) : 0);
58-
loggingLevel = (optionsMap.contains("logging-level") ? std::stoi(optionsMap["logging-level"]) : 0);
59-
enableProfiling = (optionsMap.contains("enable-profiling") ? std::stoi(optionsMap["enable-profiling"]) : 0);
60-
enableOptimizations = (optionsMap.contains("enable-optimizations") ? std::stoi(optionsMap["enable-optimizations"]) : 0);
61-
envName = (optionsMap.contains("onnx-environment-name") ? optionsMap["onnx-environment-name"] : "onnx_model_inference");
62-
63-
if (deviceType == "CPU") {
64-
(pImplOrt->sessionOptions).SetIntraOpNumThreads(intraOpNumThreads);
65-
(pImplOrt->sessionOptions).SetInterOpNumThreads(interOpNumThreads);
66-
if (intraOpNumThreads > 1 || interOpNumThreads > 1) {
52+
mModelPath = optionsMap["model-path"];
53+
mDeviceType = (optionsMap.contains("device-type") ? optionsMap["device-type"] : "CPU");
54+
mDeviceId = (optionsMap.contains("device-id") ? std::stoi(optionsMap["device-id"]) : -1);
55+
mAllocateDeviceMemory = (optionsMap.contains("allocate-device-memory") ? std::stoi(optionsMap["allocate-device-memory"]) : 0);
56+
mIntraOpNumThreads = (optionsMap.contains("intra-op-num-threads") ? std::stoi(optionsMap["intra-op-num-threads"]) : 0);
57+
mInterOpNumThreads = (optionsMap.contains("inter-op-num-threads") ? std::stoi(optionsMap["inter-op-num-threads"]) : 0);
58+
mLoggingLevel = (optionsMap.contains("logging-level") ? std::stoi(optionsMap["logging-level"]) : 0);
59+
mEnableProfiling = (optionsMap.contains("enable-profiling") ? std::stoi(optionsMap["enable-profiling"]) : 0);
60+
mEnableOptimizations = (optionsMap.contains("enable-optimizations") ? std::stoi(optionsMap["enable-optimizations"]) : 0);
61+
mEnvName = (optionsMap.contains("onnx-environment-name") ? optionsMap["onnx-environment-name"] : "onnx_model_inference");
62+
63+
if (mDeviceType == "CPU") {
64+
(pImplOrt->sessionOptions).SetIntraOpNumThreads(mIntraOpNumThreads);
65+
(pImplOrt->sessionOptions).SetInterOpNumThreads(mInterOpNumThreads);
66+
if (mIntraOpNumThreads > 1 || mInterOpNumThreads > 1) {
6767
(pImplOrt->sessionOptions).SetExecutionMode(ExecutionMode::ORT_PARALLEL);
68-
} else if (intraOpNumThreads == 1) {
68+
} else if (mIntraOpNumThreads == 1) {
6969
(pImplOrt->sessionOptions).SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
7070
}
71-
if (loggingLevel < 2) {
72-
LOG(info) << "(ORT) CPU execution provider set with " << intraOpNumThreads << " (intraOpNumThreads) and " << interOpNumThreads << " (interOpNumThreads) threads";
71+
if (mLoggingLevel < 2) {
72+
LOG(info) << "(ORT) CPU execution provider set with " << mIntraOpNumThreads << " (mIntraOpNumThreads) and " << mInterOpNumThreads << " (mInterOpNumThreads) threads";
7373
}
7474
}
7575

@@ -79,7 +79,7 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
7979
(pImplOrt->sessionOptions).DisableMemPattern();
8080
(pImplOrt->sessionOptions).DisableCpuMemArena();
8181

82-
if (enableProfiling) {
82+
if (mEnableProfiling) {
8383
if (optionsMap.contains("profiling-output-path")) {
8484
(pImplOrt->sessionOptions).EnableProfiling((optionsMap["profiling-output-path"] + "/ORT_LOG_").c_str());
8585
} else {
@@ -90,8 +90,8 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
9090
(pImplOrt->sessionOptions).DisableProfiling();
9191
}
9292

93-
(pImplOrt->sessionOptions).SetGraphOptimizationLevel(GraphOptimizationLevel(enableOptimizations));
94-
(pImplOrt->sessionOptions).SetLogSeverityLevel(OrtLoggingLevel(loggingLevel));
93+
(pImplOrt->sessionOptions).SetGraphOptimizationLevel(GraphOptimizationLevel(mEnableOptimizations));
94+
(pImplOrt->sessionOptions).SetLogSeverityLevel(OrtLoggingLevel(mLoggingLevel));
9595

9696
mInitialized = true;
9797
} else {
@@ -102,8 +102,8 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
102102
void OrtModel::initEnvironment()
103103
{
104104
pImplOrt->env = std::make_shared<Ort::Env>(
105-
OrtLoggingLevel(loggingLevel),
106-
(envName.empty() ? "ORT" : envName.c_str()),
105+
OrtLoggingLevel(mLoggingLevel),
106+
(mEnvName.empty() ? "ORT" : mEnvName.c_str()),
107107
// Integrate ORT logging into Fairlogger
108108
[](void* param, OrtLoggingLevel severity, const char* category, const char* logid, const char* code_location, const char* message) {
109109
if (severity == ORT_LOGGING_LEVEL_VERBOSE) {
@@ -126,15 +126,15 @@ void OrtModel::initEnvironment()
126126

127127
void OrtModel::initSession()
128128
{
129-
if (allocateDeviceMemory) {
130-
memoryOnDevice(deviceId);
129+
if (mAllocateDeviceMemory) {
130+
memoryOnDevice(mDeviceId);
131131
}
132-
pImplOrt->session = std::make_shared<Ort::Session>(*pImplOrt->env, modelPath.c_str(), pImplOrt->sessionOptions);
132+
pImplOrt->session = std::make_shared<Ort::Session>(*pImplOrt->env, mModelPath.c_str(), pImplOrt->sessionOptions);
133133
pImplOrt->ioBinding = std::make_unique<Ort::IoBinding>(*pImplOrt->session);
134134

135135
setIO();
136136

137-
if (loggingLevel < 2) {
137+
if (mLoggingLevel < 2) {
138138
LOG(info) << "(ORT) Model loaded successfully! (inputs: " << printShape(mInputShapes, mInputNames) << ", outputs: " << printShape(mOutputShapes, mInputNames) << ")";
139139
}
140140
}
@@ -151,22 +151,22 @@ void OrtModel::memoryOnDevice(int32_t deviceIndex)
151151
// (pImplOrt->runOptions).AddConfigEntry("memory.enable_memory_arena_shrinkage", ("gpu:" + std::to_string(deviceIndex)).c_str()); // See kOrtRunOptionsConfigEnableMemoryArenaShrinkage, https://github.com/microsoft/onnxruntime/blob/90c263f471bbce724e77d8e62831d3a9fa838b2f/include/onnxruntime/core/session/onnxruntime_run_options_config_keys.h#L27
152152

153153
std::string dev_mem_str = "";
154-
if (deviceType == "ROCM") {
154+
if (mDeviceType == "ROCM") {
155155
dev_mem_str = "Hip";
156156
}
157-
if (deviceType == "CUDA") {
157+
if (mDeviceType == "CUDA") {
158158
dev_mem_str = "Cuda";
159159
}
160160
pImplOrt->memoryInfo = Ort::MemoryInfo(dev_mem_str.c_str(), OrtAllocatorType::OrtDeviceAllocator, deviceIndex, OrtMemType::OrtMemTypeDefault);
161-
if (loggingLevel < 2) {
162-
LOG(info) << "(ORT) Memory info set to on-device memory for device type " << deviceType << " with ID " << deviceIndex << " and pImplOrt pointer " << pImplOrt;
161+
if (mLoggingLevel < 2) {
162+
LOG(info) << "(ORT) Memory info set to on-device memory for device type " << mDeviceType << " with ID " << deviceIndex << " and pImplOrt pointer " << pImplOrt;
163163
}
164164
}
165165
}
166166

167167
void OrtModel::resetSession()
168168
{
169-
pImplOrt->session = std::make_shared<Ort::Session>(*(pImplOrt->env), modelPath.c_str(), pImplOrt->sessionOptions);
169+
pImplOrt->session = std::make_shared<Ort::Session>(*(pImplOrt->env), mModelPath.c_str(), pImplOrt->sessionOptions);
170170
}
171171

172172
// Getters
@@ -215,24 +215,24 @@ void OrtModel::setIO()
215215
mOutputShapes.emplace_back((pImplOrt->session)->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
216216
}
217217

218-
inputNamesChar.resize(mInputNames.size(), nullptr);
219-
std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(inputNamesChar),
218+
mInputNamesChar.resize(mInputNames.size(), nullptr);
219+
std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(mInputNamesChar),
220220
[&](const std::string& str) { return str.c_str(); });
221-
outputNamesChar.resize(mOutputNames.size(), nullptr);
222-
std::transform(std::begin(mOutputNames), std::end(mOutputNames), std::begin(outputNamesChar),
221+
mOutputNamesChar.resize(mOutputNames.size(), nullptr);
222+
std::transform(std::begin(mOutputNames), std::end(mOutputNames), std::begin(mOutputNamesChar),
223223
[&](const std::string& str) { return str.c_str(); });
224224

225-
inputShapesCopy = mInputShapes;
226-
outputShapesCopy = mOutputShapes;
227-
inputSizePerNode.resize(mInputShapes.size(), 1);
228-
outputSizePerNode.resize(mOutputShapes.size(), 1);
225+
mInputShapesCopy = mInputShapes;
226+
mOutputShapesCopy = mOutputShapes;
227+
mInputSizePerNode.resize(mInputShapes.size(), 1);
228+
mOutputSizePerNode.resize(mOutputShapes.size(), 1);
229229
mInputsTotal = 1;
230230
for (size_t i = 0; i < mInputShapes.size(); ++i) {
231231
if (mInputShapes[i].size() > 0) {
232232
for (size_t j = 1; j < mInputShapes[i].size(); ++j) {
233233
if (mInputShapes[i][j] > 0) {
234234
mInputsTotal *= mInputShapes[i][j];
235-
inputSizePerNode[i] *= mInputShapes[i][j];
235+
mInputSizePerNode[i] *= mInputShapes[i][j];
236236
}
237237
}
238238
}
@@ -243,7 +243,7 @@ void OrtModel::setIO()
243243
for (size_t j = 1; j < mOutputShapes[i].size(); ++j) {
244244
if (mOutputShapes[i][j] > 0) {
245245
mOutputsTotal *= mOutputShapes[i][j];
246-
outputSizePerNode[i] *= mOutputShapes[i][j];
246+
mOutputSizePerNode[i] *= mOutputShapes[i][j];
247247
}
248248
}
249249
}
@@ -271,7 +271,7 @@ std::vector<O> OrtModel::inference(std::vector<I>& input)
271271
inputTensor.emplace_back(Ort::Value::CreateTensor<I>(pImplOrt->memoryInfo, input.data(), input.size(), inputShape.data(), inputShape.size()));
272272
}
273273
// input.clear();
274-
auto outputTensors = (pImplOrt->session)->Run(pImplOrt->runOptions, inputNamesChar.data(), inputTensor.data(), inputTensor.size(), outputNamesChar.data(), outputNamesChar.size());
274+
auto outputTensors = (pImplOrt->session)->Run(pImplOrt->runOptions, mInputNamesChar.data(), inputTensor.data(), inputTensor.size(), mOutputNamesChar.data(), mOutputNamesChar.size());
275275
O* outputValues = outputTensors[0].template GetTensorMutableData<O>();
276276
std::vector<O> outputValuesVec{outputValues, outputValues + inputShape[0] * mOutputShapes[0][1]};
277277
outputTensors.clear();
@@ -318,27 +318,27 @@ template void OrtModel::inference<float, float>(float*, int64_t, float*);
318318
template <class I, class O>
319319
void OrtModel::inference(I** input, int64_t input_size, O* output)
320320
{
321-
std::vector<Ort::Value> inputTensors(inputShapesCopy.size());
321+
std::vector<Ort::Value> inputTensors(mInputShapesCopy.size());
322322

323-
for (size_t i = 0; i < inputShapesCopy.size(); ++i) {
323+
for (size_t i = 0; i < mInputShapesCopy.size(); ++i) {
324324

325-
inputShapesCopy[i][0] = input_size; // batch-size
326-
outputShapesCopy[i][0] = input_size; // batch-size
325+
mInputShapesCopy[i][0] = input_size; // batch-size
326+
mOutputShapesCopy[i][0] = input_size; // batch-size
327327

328328
if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
329329
inputTensors[i] = Ort::Value::CreateTensor<Ort::Float16_t>(
330330
pImplOrt->memoryInfo,
331331
reinterpret_cast<Ort::Float16_t*>(input[i]),
332-
inputSizePerNode[i] * input_size,
333-
inputShapesCopy[i].data(),
334-
inputShapesCopy[i].size());
332+
mInputSizePerNode[i] * input_size,
333+
mInputShapesCopy[i].data(),
334+
mInputShapesCopy[i].size());
335335
} else {
336336
inputTensors[i] = Ort::Value::CreateTensor<I>(
337337
pImplOrt->memoryInfo,
338338
input[i],
339-
inputSizePerNode[i] * input_size,
340-
inputShapesCopy[i].data(),
341-
inputShapesCopy[i].size());
339+
mInputSizePerNode[i] * input_size,
340+
mInputShapesCopy[i].data(),
341+
mInputShapesCopy[i].size());
342342
}
343343
}
344344

@@ -347,27 +347,27 @@ void OrtModel::inference(I** input, int64_t input_size, O* output)
347347
outputTensor = Ort::Value::CreateTensor<Ort::Float16_t>(
348348
pImplOrt->memoryInfo,
349349
reinterpret_cast<Ort::Float16_t*>(output),
350-
outputSizePerNode[0] * input_size, // assumes that there is only one output node
351-
outputShapesCopy[0].data(),
352-
outputShapesCopy[0].size());
350+
mOutputSizePerNode[0] * input_size, // assumes that there is only one output node
351+
mOutputShapesCopy[0].data(),
352+
mOutputShapesCopy[0].size());
353353
} else {
354354
outputTensor = Ort::Value::CreateTensor<O>(
355355
pImplOrt->memoryInfo,
356356
output,
357-
outputSizePerNode[0] * input_size, // assumes that there is only one output node
358-
outputShapesCopy[0].data(),
359-
outputShapesCopy[0].size());
357+
mOutputSizePerNode[0] * input_size, // assumes that there is only one output node
358+
mOutputShapesCopy[0].data(),
359+
mOutputShapesCopy[0].size());
360360
}
361361

362362
// === Run inference ===
363363
pImplOrt->session->Run(
364364
pImplOrt->runOptions,
365-
inputNamesChar.data(),
365+
mInputNamesChar.data(),
366366
inputTensors.data(),
367-
inputNamesChar.size(),
368-
outputNamesChar.data(),
367+
mInputNamesChar.size(),
368+
mOutputNamesChar.data(),
369369
&outputTensor,
370-
outputNamesChar.size());
370+
mOutputNamesChar.size());
371371
}
372372

373373
template void OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(OrtDataType::Float16_t**, int64_t, OrtDataType::Float16_t*);
@@ -382,37 +382,37 @@ std::vector<O> OrtModel::inference(std::vector<std::vector<I>>& inputs)
382382

383383
for (size_t i = 0; i < inputs.size(); ++i) {
384384

385-
inputShapesCopy[i][0] = inputs[i].size() / inputSizePerNode[i]; // batch-size
385+
mInputShapesCopy[i][0] = inputs[i].size() / mInputSizePerNode[i]; // batch-size
386386

387387
if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
388388
input_tensors.emplace_back(
389389
Ort::Value::CreateTensor<Ort::Float16_t>(
390390
pImplOrt->memoryInfo,
391391
reinterpret_cast<Ort::Float16_t*>(inputs[i].data()),
392-
inputSizePerNode[i] * inputShapesCopy[i][0],
393-
inputShapesCopy[i].data(),
394-
inputShapesCopy[i].size()));
392+
mInputSizePerNode[i] * mInputShapesCopy[i][0],
393+
mInputShapesCopy[i].data(),
394+
mInputShapesCopy[i].size()));
395395
} else {
396396
input_tensors.emplace_back(
397397
Ort::Value::CreateTensor<I>(
398398
pImplOrt->memoryInfo,
399399
inputs[i].data(),
400-
inputSizePerNode[i] * inputShapesCopy[i][0],
401-
inputShapesCopy[i].data(),
402-
inputShapesCopy[i].size()));
400+
mInputSizePerNode[i] * mInputShapesCopy[i][0],
401+
mInputShapesCopy[i].data(),
402+
mInputShapesCopy[i].size()));
403403
}
404404
}
405405

406-
int32_t totalOutputSize = mOutputsTotal * inputShapesCopy[0][0];
406+
int32_t totalOutputSize = mOutputsTotal * mInputShapesCopy[0][0];
407407

408408
// === Run inference ===
409409
auto output_tensors = pImplOrt->session->Run(
410410
pImplOrt->runOptions,
411-
inputNamesChar.data(),
411+
mInputNamesChar.data(),
412412
input_tensors.data(),
413413
input_tensors.size(),
414-
outputNamesChar.data(),
415-
outputNamesChar.size());
414+
mOutputNamesChar.data(),
415+
mOutputNamesChar.size());
416416

417417
// === Extract output values ===
418418
O* output_data = output_tensors[0].template GetTensorMutableData<O>();

0 commit comments

Comments
 (0)