Skip to content

Commit 7175de4

Browse files
Fixing member variable naming (#14217)
* Fixing member variable naming * Please consider the following formatting changes * Changing to mPImplOrt --------- Co-authored-by: ALICE Action Bot <alibuild@cern.ch>
1 parent 72b50c6 commit 7175de4

File tree

8 files changed

+444
-446
lines changed

8 files changed

+444
-446
lines changed

Common/ML/include/ML/OrtInterface.h

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -70,23 +70,23 @@ class OrtModel
7070
Ort::SessionOptions* getSessionOptions();
7171
Ort::MemoryInfo* getMemoryInfo();
7272
Ort::Env* getEnv();
73-
int32_t getIntraOpNumThreads() const { return intraOpNumThreads; }
74-
int32_t getInterOpNumThreads() const { return interOpNumThreads; }
73+
int32_t getIntraOpNumThreads() const { return mIntraOpNumThreads; }
74+
int32_t getInterOpNumThreads() const { return mInterOpNumThreads; }
7575

7676
// Setters
77-
void setDeviceId(int32_t id) { deviceId = id; }
77+
void setDeviceId(int32_t id) { mDeviceId = id; }
7878
void setIO();
79-
void setActiveThreads(int threads) { intraOpNumThreads = threads; }
79+
void setActiveThreads(int threads) { mIntraOpNumThreads = threads; }
8080
void setIntraOpNumThreads(int threads)
8181
{
82-
if (deviceType == "CPU") {
83-
intraOpNumThreads = threads;
82+
if (mDeviceType == "CPU") {
83+
mIntraOpNumThreads = threads;
8484
}
8585
}
8686
void setInterOpNumThreads(int threads)
8787
{
88-
if (deviceType == "CPU") {
89-
interOpNumThreads = threads;
88+
if (mDeviceType == "CPU") {
89+
mInterOpNumThreads = threads;
9090
}
9191
}
9292
void setEnv(Ort::Env*);
@@ -113,19 +113,19 @@ class OrtModel
113113
private:
114114
// ORT variables -> need to be hidden as pImpl
115115
struct OrtVariables;
116-
OrtVariables* pImplOrt;
116+
OrtVariables* mPImplOrt;
117117

118118
// Input & Output specifications of the loaded network
119-
std::vector<const char*> inputNamesChar, outputNamesChar;
119+
std::vector<const char*> mInputNamesChar, mOutputNamesChar;
120120
std::vector<std::string> mInputNames, mOutputNames;
121-
std::vector<std::vector<int64_t>> mInputShapes, mOutputShapes, inputShapesCopy, outputShapesCopy; // Input shapes
122-
std::vector<int64_t> inputSizePerNode, outputSizePerNode; // Output shapes
123-
int32_t mInputsTotal = 0, mOutputsTotal = 0; // Total number of inputs and outputs
121+
std::vector<std::vector<int64_t>> mInputShapes, mOutputShapes, mInputShapesCopy, mOutputShapesCopy; // Input shapes
122+
std::vector<int64_t> mInputSizePerNode, mOutputSizePerNode; // Output shapes
123+
int32_t mInputsTotal = 0, mOutputsTotal = 0; // Total number of inputs and outputs
124124

125125
// Environment settings
126126
bool mInitialized = false;
127-
std::string modelPath, envName = "", deviceType = "CPU", thread_affinity = ""; // device options should be cpu, rocm, migraphx, cuda
128-
int32_t intraOpNumThreads = 1, interOpNumThreads = 1, deviceId = -1, enableProfiling = 0, loggingLevel = 0, allocateDeviceMemory = 0, enableOptimizations = 0;
127+
std::string mModelPath, mEnvName = "", mDeviceType = "CPU", mThreadAffinity = ""; // device options should be cpu, rocm, migraphx, cuda
128+
int32_t mIntraOpNumThreads = 1, mInterOpNumThreads = 1, mDeviceId = -1, mEnableProfiling = 0, mLoggingLevel = 0, mAllocateDeviceMemory = 0, mEnableOptimizations = 0;
129129

130130
std::string printShape(const std::vector<int64_t>&);
131131
std::string printShape(const std::vector<std::vector<int64_t>>&, std::vector<std::string>&);

Common/ML/src/OrtInterface.cxx

Lines changed: 125 additions & 125 deletions
Large diffs are not rendered by default.

GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx

Lines changed: 86 additions & 86 deletions
Large diffs are not rendered by default.

GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizer.cxx

Lines changed: 39 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -25,69 +25,69 @@ void GPUTPCNNClusterizer::SetMaxData(const GPUTrackingInOutPointers& io) {}
2525

2626
void* GPUTPCNNClusterizer::setIOPointers(void* mem)
2727
{
28-
if (nnClusterizerBatchedMode > 0) {
29-
if (nnInferenceInputDType == 0 && nnClusterizerElementSize > 0) {
30-
computePointerWithAlignment(mem, inputData_16, nnClusterizerBatchedMode * nnClusterizerElementSize);
31-
} else if (nnInferenceInputDType == 1 && nnClusterizerElementSize > 0) {
32-
computePointerWithAlignment(mem, inputData_32, nnClusterizerBatchedMode * nnClusterizerElementSize);
28+
if (mNnClusterizerBatchedMode > 0) {
29+
if (mNnInferenceInputDType == 0 && mNnClusterizerElementSize > 0) {
30+
computePointerWithAlignment(mem, mInputData_16, mNnClusterizerBatchedMode * mNnClusterizerElementSize);
31+
} else if (mNnInferenceInputDType == 1 && mNnClusterizerElementSize > 0) {
32+
computePointerWithAlignment(mem, mInputData_32, mNnClusterizerBatchedMode * mNnClusterizerElementSize);
3333
}
34-
computePointerWithAlignment(mem, clusterFlags, 2 * nnClusterizerBatchedMode);
34+
computePointerWithAlignment(mem, mClusterFlags, 2 * mNnClusterizerBatchedMode);
3535

36-
if (nnInferenceOutputDType == 0 && nnClusterizerElementSize > 0) {
37-
if (nnClusterizerModelClassNumOutputNodes > 0) {
38-
computePointerWithAlignment(mem, modelProbabilities_16, nnClusterizerBatchedMode * nnClusterizerModelClassNumOutputNodes);
36+
if (mNnInferenceOutputDType == 0 && mNnClusterizerElementSize > 0) {
37+
if (mNnClusterizerModelClassNumOutputNodes > 0) {
38+
computePointerWithAlignment(mem, mModelProbabilities_16, mNnClusterizerBatchedMode * mNnClusterizerModelClassNumOutputNodes);
3939
}
40-
if (!nnClusterizerUseCfRegression) {
41-
if (nnClusterizerModelReg1NumOutputNodes > 0) {
42-
computePointerWithAlignment(mem, outputDataReg1_16, nnClusterizerBatchedMode * nnClusterizerModelReg1NumOutputNodes);
40+
if (!mNnClusterizerUseCfRegression) {
41+
if (mNnClusterizerModelReg1NumOutputNodes > 0) {
42+
computePointerWithAlignment(mem, mOutputDataReg1_16, mNnClusterizerBatchedMode * mNnClusterizerModelReg1NumOutputNodes);
4343
}
44-
if (nnClusterizerModelReg2NumOutputNodes > 0) {
45-
computePointerWithAlignment(mem, outputDataReg2_16, nnClusterizerBatchedMode * nnClusterizerModelReg2NumOutputNodes);
44+
if (mNnClusterizerModelReg2NumOutputNodes > 0) {
45+
computePointerWithAlignment(mem, mOutputDataReg2_16, mNnClusterizerBatchedMode * mNnClusterizerModelReg2NumOutputNodes);
4646
}
4747
}
48-
} else if (nnInferenceOutputDType == 1 && nnClusterizerElementSize > 0) {
49-
if (nnClusterizerModelClassNumOutputNodes > 0) {
50-
computePointerWithAlignment(mem, modelProbabilities_32, nnClusterizerBatchedMode * nnClusterizerModelClassNumOutputNodes);
48+
} else if (mNnInferenceOutputDType == 1 && mNnClusterizerElementSize > 0) {
49+
if (mNnClusterizerModelClassNumOutputNodes > 0) {
50+
computePointerWithAlignment(mem, mModelProbabilities_32, mNnClusterizerBatchedMode * mNnClusterizerModelClassNumOutputNodes);
5151
}
52-
if (!nnClusterizerUseCfRegression) {
53-
if (nnClusterizerModelReg1NumOutputNodes > 0) {
54-
computePointerWithAlignment(mem, outputDataReg1_32, nnClusterizerBatchedMode * nnClusterizerModelReg1NumOutputNodes);
52+
if (!mNnClusterizerUseCfRegression) {
53+
if (mNnClusterizerModelReg1NumOutputNodes > 0) {
54+
computePointerWithAlignment(mem, mOutputDataReg1_32, mNnClusterizerBatchedMode * mNnClusterizerModelReg1NumOutputNodes);
5555
}
56-
if (nnClusterizerModelReg2NumOutputNodes > 0) {
57-
computePointerWithAlignment(mem, outputDataReg2_32, nnClusterizerBatchedMode * nnClusterizerModelReg2NumOutputNodes);
56+
if (mNnClusterizerModelReg2NumOutputNodes > 0) {
57+
computePointerWithAlignment(mem, mOutputDataReg2_32, mNnClusterizerBatchedMode * mNnClusterizerModelReg2NumOutputNodes);
5858
}
5959
}
6060
}
6161
}
62-
if (nnClusterizerTotalClusters > 0) {
63-
computePointerWithAlignment(mem, outputDataClass, nnClusterizerTotalClusters);
62+
if (mNnClusterizerTotalClusters > 0) {
63+
computePointerWithAlignment(mem, mOutputDataClass, mNnClusterizerTotalClusters);
6464
}
6565
return mem;
6666
}
6767

6868
// std::vector<int32_t> GPUTPCNNClusterizer::pointerSizes() {
6969
// std::vector<int32_t> sizes(7, -1);
70-
// if (nnClusterizerBatchedMode > 0) {
71-
// if (nnInferenceInputDType == 0 && nnClusterizerElementSize > 0) {
72-
// sizes[0] = nnClusterizerBatchedMode * nnClusterizerElementSize; // inputData16
73-
// } else if (nnInferenceInputDType == 1 && nnClusterizerElementSize > 0) {
74-
// sizes[1] = nnClusterizerBatchedMode * nnClusterizerElementSize; // inputData32
70+
// if (mNnClusterizerBatchedMode > 0) {
71+
// if (mNnInferenceInputDType == 0 && mNnClusterizerElementSize > 0) {
72+
// sizes[0] = mNnClusterizerBatchedMode * mNnClusterizerElementSize; // inputData16
73+
// } else if (mNnInferenceInputDType == 1 && mNnClusterizerElementSize > 0) {
74+
// sizes[1] = mNnClusterizerBatchedMode * mNnClusterizerElementSize; // inputData32
7575
// }
76-
// sizes[2] = 2 * nnClusterizerBatchedMode; // clusterFlags
77-
// if (nnClusterizerModelClassNumOutputNodes > 0) {
78-
// sizes[3] = nnClusterizerBatchedMode * nnClusterizerModelClassNumOutputNodes; // modelProbabilities
76+
// sizes[2] = 2 * mNnClusterizerBatchedMode; // mClusterFlags
77+
// if (mNnClusterizerModelClassNumOutputNodes > 0) {
78+
// sizes[3] = mNnClusterizerBatchedMode * mNnClusterizerModelClassNumOutputNodes; // modelProbabilities
7979
// }
80-
// if (!nnClusterizerUseCfRegression) {
81-
// if (nnClusterizerModelReg1NumOutputNodes > 0) {
82-
// sizes[4] = nnClusterizerBatchedMode * nnClusterizerModelReg1NumOutputNodes; // outputDataReg1
80+
// if (!mNnClusterizerUseCfRegression) {
81+
// if (mNnClusterizerModelReg1NumOutputNodes > 0) {
82+
// sizes[4] = mNnClusterizerBatchedMode * mNnClusterizerModelReg1NumOutputNodes; // outputDataReg1
8383
// }
84-
// if (nnClusterizerModelReg2NumOutputNodes > 0) {
85-
// sizes[5] = nnClusterizerBatchedMode * nnClusterizerModelReg2NumOutputNodes; // outputDataReg2
84+
// if (mNnClusterizerModelReg2NumOutputNodes > 0) {
85+
// sizes[5] = mNnClusterizerBatchedMode * mNnClusterizerModelReg2NumOutputNodes; // outputDataReg2
8686
// }
8787
// }
8888
// }
89-
// if (nnClusterizerTotalClusters > 0) {
90-
// sizes[6] = nnClusterizerTotalClusters; // outputDataClass
89+
// if (mNnClusterizerTotalClusters > 0) {
90+
// sizes[6] = mNnClusterizerTotalClusters; // mOutputDataClass
9191
// }
9292
// return sizes;
9393
// }

GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizer.h

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -37,42 +37,42 @@ class GPUTPCNNClusterizer : public GPUProcessor
3737

3838
// Neural network clusterization
3939

40-
int nnClusterizerSizeInputRow = 3;
41-
int nnClusterizerSizeInputPad = 3;
42-
int nnClusterizerSizeInputTime = 3;
43-
int nnClusterizerElementSize = -1;
44-
bool nnClusterizerAddIndexData = true;
45-
float nnClassThreshold = 0.01;
46-
bool nnSigmoidTrafoClassThreshold = 1;
47-
int nnClusterizerUseCfRegression = 0;
48-
int nnClusterizerBatchedMode = 1;
49-
int nnClusterizerTotalClusters = 1;
50-
int nnClusterizerVerbosity = 0;
51-
int nnClusterizerBoundaryFillValue = -1;
52-
int nnClusterizerModelClassNumOutputNodes = -1;
53-
int nnClusterizerModelReg1NumOutputNodes = -1;
54-
int nnClusterizerModelReg2NumOutputNodes = -1;
55-
int nnInferenceInputDType = 0; // 0: float16, 1: float32
56-
int nnInferenceOutputDType = 0; // 0: float16, 1: float32
40+
int mNnClusterizerSizeInputRow = 3;
41+
int mNnClusterizerSizeInputPad = 3;
42+
int mNnClusterizerSizeInputTime = 3;
43+
int mNnClusterizerElementSize = -1;
44+
bool mNnClusterizerAddIndexData = true;
45+
float mNnClassThreshold = 0.01;
46+
bool mNnSigmoidTrafoClassThreshold = 1;
47+
int mNnClusterizerUseCfRegression = 0;
48+
int mNnClusterizerBatchedMode = 1;
49+
int mNnClusterizerTotalClusters = 1;
50+
int mNnClusterizerVerbosity = 0;
51+
int mNnClusterizerBoundaryFillValue = -1;
52+
int mNnClusterizerModelClassNumOutputNodes = -1;
53+
int mNnClusterizerModelReg1NumOutputNodes = -1;
54+
int mNnClusterizerModelReg2NumOutputNodes = -1;
55+
int mNnInferenceInputDType = 0; // 0: float16, 1: float32
56+
int mNnInferenceOutputDType = 0; // 0: float16, 1: float32
5757
int mISector = -1;
58-
int deviceId = -1;
58+
int mDeviceId = -1;
5959

6060
// Memory allocation for neural network
6161

62-
bool* clusterFlags = nullptr; // mSplitInTime, mSplitInPad. Techincally both flags are set in the same way -> ClusterAccumulator.cx=nullptr
63-
int* outputDataClass = nullptr;
62+
bool* mClusterFlags = nullptr; // mSplitInTime, mSplitInPad. Techincally both flags are set in the same way -> ClusterAccumulator.cx=nullptr
63+
int* mOutputDataClass = nullptr;
6464

6565
// FP32
66-
float* inputData_32 = nullptr;
67-
float* modelProbabilities_32 = nullptr;
68-
float* outputDataReg1_32 = nullptr;
69-
float* outputDataReg2_32 = nullptr;
66+
float* mInputData_32 = nullptr;
67+
float* mModelProbabilities_32 = nullptr;
68+
float* mOutputDataReg1_32 = nullptr;
69+
float* mOutputDataReg2_32 = nullptr;
7070

7171
// FP16
72-
OrtDataType::Float16_t* inputData_16 = nullptr;
73-
OrtDataType::Float16_t* modelProbabilities_16 = nullptr;
74-
OrtDataType::Float16_t* outputDataReg1_16 = nullptr;
75-
OrtDataType::Float16_t* outputDataReg2_16 = nullptr;
72+
OrtDataType::Float16_t* mInputData_16 = nullptr;
73+
OrtDataType::Float16_t* mModelProbabilities_16 = nullptr;
74+
OrtDataType::Float16_t* mOutputDataReg1_16 = nullptr;
75+
OrtDataType::Float16_t* mOutputDataReg2_16 = nullptr;
7676

7777
int16_t mMemoryId = -1;
7878
}; // class GPUTPCNNClusterizer

GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizerHost.cxx

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ void GPUTPCNNClusterizerHost::init(const GPUSettingsProcessingNNclusterizer& set
4545
}
4646
}
4747

48-
OrtOptions = {
48+
mOrtOptions = {
4949
{"model-path", class_model_path},
5050
{"device-type", settings.nnInferenceDevice},
5151
{"allocate-device-memory", std::to_string(settings.nnInferenceAllocateDevMem)},
@@ -57,60 +57,60 @@ void GPUTPCNNClusterizerHost::init(const GPUSettingsProcessingNNclusterizer& set
5757
{"logging-level", std::to_string(settings.nnInferenceVerbosity)},
5858
{"onnx-environment-name", "c1"}};
5959

60-
model_class.initOptions(OrtOptions);
61-
modelsUsed[0] = true;
60+
mModelClass.initOptions(mOrtOptions);
61+
mModelsUsed[0] = true;
6262

6363
reg_model_paths_local = o2::utils::Str::tokenize(reg_model_path, ':');
6464

6565
if (!settings.nnClusterizerUseCfRegression) {
6666
if (reg_model_paths_local.size() == 1) {
67-
OrtOptions["model-path"] = reg_model_paths_local[0];
68-
OrtOptions["onnx-environment-name"] = "r1";
69-
model_reg_1.initOptions(OrtOptions);
70-
modelsUsed[1] = true;
67+
mOrtOptions["model-path"] = reg_model_paths_local[0];
68+
mOrtOptions["onnx-environment-name"] = "r1";
69+
mModelReg1.initOptions(mOrtOptions);
70+
mModelsUsed[1] = true;
7171
} else {
72-
OrtOptions["model-path"] = reg_model_paths_local[0];
73-
OrtOptions["onnx-environment-name"] = "r1";
74-
model_reg_1.initOptions(OrtOptions);
75-
modelsUsed[1] = true;
76-
OrtOptions["model-path"] = reg_model_paths_local[1];
77-
OrtOptions["onnx-environment-name"] = "r2";
78-
model_reg_2.initOptions(OrtOptions);
79-
modelsUsed[2] = true;
72+
mOrtOptions["model-path"] = reg_model_paths_local[0];
73+
mOrtOptions["onnx-environment-name"] = "r1";
74+
mModelReg1.initOptions(mOrtOptions);
75+
mModelsUsed[1] = true;
76+
mOrtOptions["model-path"] = reg_model_paths_local[1];
77+
mOrtOptions["onnx-environment-name"] = "r2";
78+
mModelReg2.initOptions(mOrtOptions);
79+
mModelsUsed[2] = true;
8080
}
8181
}
8282
}
8383

8484
void GPUTPCNNClusterizerHost::initClusterizer(const GPUSettingsProcessingNNclusterizer& settings, GPUTPCNNClusterizer& clustererNN)
8585
{
86-
clustererNN.nnClusterizerUseCfRegression = settings.nnClusterizerUseCfRegression;
87-
clustererNN.nnClusterizerSizeInputRow = settings.nnClusterizerSizeInputRow;
88-
clustererNN.nnClusterizerSizeInputPad = settings.nnClusterizerSizeInputPad;
89-
clustererNN.nnClusterizerSizeInputTime = settings.nnClusterizerSizeInputTime;
90-
clustererNN.nnClusterizerAddIndexData = settings.nnClusterizerAddIndexData;
91-
clustererNN.nnClusterizerElementSize = ((2 * settings.nnClusterizerSizeInputRow + 1) * (2 * settings.nnClusterizerSizeInputPad + 1) * (2 * settings.nnClusterizerSizeInputTime + 1)) + (settings.nnClusterizerAddIndexData ? 3 : 0);
92-
clustererNN.nnClusterizerBatchedMode = settings.nnClusterizerBatchedMode;
93-
clustererNN.nnClusterizerBoundaryFillValue = settings.nnClusterizerBoundaryFillValue;
94-
clustererNN.nnSigmoidTrafoClassThreshold = settings.nnSigmoidTrafoClassThreshold;
95-
if (clustererNN.nnSigmoidTrafoClassThreshold) {
96-
clustererNN.nnClassThreshold = (float)std::log(settings.nnClassThreshold / (1.f - settings.nnClassThreshold));
86+
clustererNN.mNnClusterizerUseCfRegression = settings.nnClusterizerUseCfRegression;
87+
clustererNN.mNnClusterizerSizeInputRow = settings.nnClusterizerSizeInputRow;
88+
clustererNN.mNnClusterizerSizeInputPad = settings.nnClusterizerSizeInputPad;
89+
clustererNN.mNnClusterizerSizeInputTime = settings.nnClusterizerSizeInputTime;
90+
clustererNN.mNnClusterizerAddIndexData = settings.nnClusterizerAddIndexData;
91+
clustererNN.mNnClusterizerElementSize = ((2 * settings.nnClusterizerSizeInputRow + 1) * (2 * settings.nnClusterizerSizeInputPad + 1) * (2 * settings.nnClusterizerSizeInputTime + 1)) + (settings.nnClusterizerAddIndexData ? 3 : 0);
92+
clustererNN.mNnClusterizerBatchedMode = settings.nnClusterizerBatchedMode;
93+
clustererNN.mNnClusterizerBoundaryFillValue = settings.nnClusterizerBoundaryFillValue;
94+
clustererNN.mNnSigmoidTrafoClassThreshold = settings.nnSigmoidTrafoClassThreshold;
95+
if (clustererNN.mNnSigmoidTrafoClassThreshold) {
96+
clustererNN.mNnClassThreshold = (float)std::log(settings.nnClassThreshold / (1.f - settings.nnClassThreshold));
9797
} else {
98-
clustererNN.nnClassThreshold = settings.nnClassThreshold;
98+
clustererNN.mNnClassThreshold = settings.nnClassThreshold;
9999
}
100100
if (settings.nnClusterizerVerbosity < 0) {
101-
clustererNN.nnClusterizerVerbosity = settings.nnInferenceVerbosity;
101+
clustererNN.mNnClusterizerVerbosity = settings.nnInferenceVerbosity;
102102
} else {
103-
clustererNN.nnClusterizerVerbosity = settings.nnClusterizerVerbosity;
103+
clustererNN.mNnClusterizerVerbosity = settings.nnClusterizerVerbosity;
104104
}
105-
clustererNN.nnInferenceInputDType = settings.nnInferenceInputDType.find("32") != std::string::npos;
106-
clustererNN.nnInferenceOutputDType = settings.nnInferenceOutputDType.find("32") != std::string::npos;
107-
clustererNN.nnClusterizerModelClassNumOutputNodes = model_class.getNumOutputNodes()[0][1];
105+
clustererNN.mNnInferenceInputDType = settings.nnInferenceInputDType.find("32") != std::string::npos;
106+
clustererNN.mNnInferenceOutputDType = settings.nnInferenceOutputDType.find("32") != std::string::npos;
107+
clustererNN.mNnClusterizerModelClassNumOutputNodes = mModelClass.getNumOutputNodes()[0][1];
108108
if (!settings.nnClusterizerUseCfRegression) {
109-
if (model_class.getNumOutputNodes()[0][1] == 1 || !model_reg_2.isInitialized()) {
110-
clustererNN.nnClusterizerModelReg1NumOutputNodes = model_reg_1.getNumOutputNodes()[0][1];
109+
if (mModelClass.getNumOutputNodes()[0][1] == 1 || !mModelReg2.isInitialized()) {
110+
clustererNN.mNnClusterizerModelReg1NumOutputNodes = mModelReg1.getNumOutputNodes()[0][1];
111111
} else {
112-
clustererNN.nnClusterizerModelReg1NumOutputNodes = model_reg_1.getNumOutputNodes()[0][1];
113-
clustererNN.nnClusterizerModelReg2NumOutputNodes = model_reg_2.getNumOutputNodes()[0][1];
112+
clustererNN.mNnClusterizerModelReg1NumOutputNodes = mModelReg1.getNumOutputNodes()[0][1];
113+
clustererNN.mNnClusterizerModelReg2NumOutputNodes = mModelReg2.getNumOutputNodes()[0][1];
114114
}
115115
}
116116
}
@@ -199,20 +199,20 @@ void MockedOrtAllocator::LeakCheck()
199199

200200
void GPUTPCNNClusterizerHost::volatileOrtAllocator(Ort::Env* env, Ort::MemoryInfo* memInfo, GPUReconstruction* rec, bool recreate)
201201
{
202-
mockedAlloc = std::make_shared<MockedOrtAllocator>(rec, (OrtMemoryInfo*)(*memInfo));
202+
mMockedAlloc = std::make_shared<MockedOrtAllocator>(rec, (OrtMemoryInfo*)(*memInfo));
203203
if (recreate) {
204204
Ort::ThrowOnError(Ort::GetApi().UnregisterAllocator((OrtEnv*)(*env), (OrtMemoryInfo*)(*memInfo)));
205205
}
206-
Ort::ThrowOnError(Ort::GetApi().RegisterAllocator((OrtEnv*)(*env), mockedAlloc.get()));
207-
memInfo = (Ort::MemoryInfo*)mockedAlloc->Info();
206+
Ort::ThrowOnError(Ort::GetApi().RegisterAllocator((OrtEnv*)(*env), mMockedAlloc.get()));
207+
memInfo = (Ort::MemoryInfo*)mMockedAlloc->Info();
208208
}
209209

210210
const OrtMemoryInfo* GPUTPCNNClusterizerHost::getMockedMemoryInfo()
211211
{
212-
return mockedAlloc->Info();
212+
return mMockedAlloc->Info();
213213
}
214214

215215
MockedOrtAllocator* GPUTPCNNClusterizerHost::getMockedAllocator()
216216
{
217-
return mockedAlloc.get();
217+
return mMockedAlloc.get();
218218
}

0 commit comments

Comments
 (0)