Skip to content

Commit cedce21

Browse files
authored
[Tools/ML] model: Add const, pass by reference, add std prefix. (#12592)
1 parent f88c0a3 commit cedce21

File tree

2 files changed

+20
-20
lines changed

2 files changed

+20
-20
lines changed

Tools/ML/model.cxx

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -42,17 +42,17 @@ namespace ml
4242
std::string OnnxModel::printShape(const std::vector<int64_t>& v)
4343
{
4444
std::stringstream ss("");
45-
for (size_t i = 0; i < v.size() - 1; i++)
45+
for (std::size_t i = 0; i < v.size() - 1; i++)
4646
ss << v[i] << "x";
4747
ss << v[v.size() - 1];
4848
return ss.str();
4949
}
5050

51-
bool OnnxModel::checkHyperloop(bool verbose)
51+
bool OnnxModel::checkHyperloop(const bool verbose)
5252
{
5353
/// Testing hyperloop core settings
5454
const char* alienCores = gSystem->Getenv("ALIEN_JDL_CPUCORES");
55-
bool alienCoresFound = (alienCores != NULL);
55+
const bool alienCoresFound = (alienCores != NULL);
5656
if (alienCoresFound) {
5757
if (verbose) {
5858
LOGP(info, "Hyperloop test/Grid job detected! Number of cores = {}. Setting threads anyway to 1.", alienCores);
@@ -68,7 +68,7 @@ bool OnnxModel::checkHyperloop(bool verbose)
6868
return alienCoresFound;
6969
}
7070

71-
void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int threads, uint64_t from, uint64_t until)
71+
void OnnxModel::initModel(const std::string& localPath, const bool enableOptimizations, const int threads, const uint64_t from, const uint64_t until)
7272
{
7373

7474
assert(from <= until);
@@ -90,26 +90,26 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
9090
mEnv = std::make_shared<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "onnx-model");
9191
mSession = std::make_shared<Ort::Session>(*mEnv, modelPath.c_str(), sessionOptions);
9292

93-
Ort::AllocatorWithDefaultOptions tmpAllocator;
94-
for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
93+
Ort::AllocatorWithDefaultOptions const tmpAllocator;
94+
for (std::size_t i = 0; i < mSession->GetInputCount(); ++i) {
9595
mInputNames.push_back(mSession->GetInputNameAllocated(i, tmpAllocator).get());
9696
}
97-
for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
97+
for (std::size_t i = 0; i < mSession->GetInputCount(); ++i) {
9898
mInputShapes.emplace_back(mSession->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
9999
}
100-
for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
100+
for (std::size_t i = 0; i < mSession->GetOutputCount(); ++i) {
101101
mOutputNames.push_back(mSession->GetOutputNameAllocated(i, tmpAllocator).get());
102102
}
103-
for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
103+
for (std::size_t i = 0; i < mSession->GetOutputCount(); ++i) {
104104
mOutputShapes.emplace_back(mSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
105105
}
106106
LOG(info) << "Input Nodes:";
107-
for (size_t i = 0; i < mInputNames.size(); i++) {
107+
for (std::size_t i = 0; i < mInputNames.size(); i++) {
108108
LOG(info) << "\t" << mInputNames[i] << " : " << printShape(mInputShapes[i]);
109109
}
110110

111111
LOG(info) << "Output Nodes:";
112-
for (size_t i = 0; i < mOutputNames.size(); i++) {
112+
for (std::size_t i = 0; i < mOutputNames.size(); i++) {
113113
LOG(info) << "\t" << mOutputNames[i] << " : " << printShape(mOutputShapes[i]);
114114
}
115115

@@ -121,7 +121,7 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
121121
LOG(info) << "--- Model initialized! ---";
122122
}
123123

124-
void OnnxModel::setActiveThreads(int threads)
124+
void OnnxModel::setActiveThreads(const int threads)
125125
{
126126
activeThreads = threads;
127127
if (!checkHyperloop(false)) {

Tools/ML/model.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ class OnnxModel
4747
~OnnxModel() = default;
4848

4949
// Inferencing
50-
void initModel(std::string, bool = false, int = 0, uint64_t = 0, uint64_t = 0);
50+
void initModel(const std::string&, const bool = false, const int = 0, const uint64_t = 0, const uint64_t = 0);
5151

5252
// template methods -- best to define them in header
5353
template <typename T>
@@ -57,7 +57,7 @@ class OnnxModel
5757
// assert(input[0].GetTensorTypeAndShapeInfo().GetShape() == getNumInputNodes()); --> Fails build in debug mode, TODO: assertion should be checked somehow
5858

5959
try {
60-
Ort::RunOptions runOptions;
60+
const Ort::RunOptions runOptions;
6161
std::vector<const char*> inputNamesChar(mInputNames.size(), nullptr);
6262
std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(inputNamesChar),
6363
[&](const std::string& str) { return str.c_str(); });
@@ -87,7 +87,7 @@ class OnnxModel
8787
template <typename T>
8888
T* evalModel(std::vector<T>& input)
8989
{
90-
int64_t size = input.size();
90+
const int64_t size = input.size();
9191
assert(size % mInputShapes[0][1] == 0);
9292
std::vector<int64_t> inputShape{size / mInputShapes[0][1], mInputShapes[0][1]};
9393
std::vector<Ort::Value> inputTensors;
@@ -106,16 +106,16 @@ class OnnxModel
106106

107107
Ort::MemoryInfo memInfo = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
108108

109-
for (size_t iinput = 0; iinput < input.size(); iinput++) {
109+
for (std::size_t iinput = 0; iinput < input.size(); iinput++) {
110110
[[maybe_unused]] int totalSize = 1;
111111
int64_t size = input[iinput].size();
112-
for (size_t idim = 1; idim < mInputShapes[iinput].size(); idim++) {
112+
for (std::size_t idim = 1; idim < mInputShapes[iinput].size(); idim++) {
113113
totalSize *= mInputShapes[iinput][idim];
114114
}
115115
assert(size % totalSize == 0);
116116

117117
std::vector<int64_t> inputShape{static_cast<int64_t>(size / totalSize)};
118-
for (size_t idim = 1; idim < mInputShapes[iinput].size(); idim++) {
118+
for (std::size_t idim = 1; idim < mInputShapes[iinput].size(); idim++) {
119119
inputShape.push_back(mInputShapes[iinput][idim]);
120120
}
121121

@@ -142,7 +142,7 @@ class OnnxModel
142142
int getNumOutputNodes() const { return mOutputShapes[0][1]; }
143143
uint64_t getValidityFrom() const { return validFrom; }
144144
uint64_t getValidityUntil() const { return validUntil; }
145-
void setActiveThreads(int);
145+
void setActiveThreads(const int);
146146

147147
private:
148148
// Environment variables for the ONNX runtime
@@ -164,7 +164,7 @@ class OnnxModel
164164

165165
// Internal function for printing the shape of tensors
166166
std::string printShape(const std::vector<int64_t>&);
167-
bool checkHyperloop(bool = true);
167+
bool checkHyperloop(const bool = true);
168168
};
169169

170170
} // namespace ml

0 commit comments

Comments
 (0)