Skip to content

Commit f2d2b86

Browse files
committed
Adding deterministic mode (unfortunately that did not make it deterministic on GPU -> general problem with ONNX)
1 parent e6482ab commit f2d2b86

File tree

4 files changed

+10
-2
lines changed

4 files changed

+10
-2
lines changed

Common/ML/include/ML/OrtInterface.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ class OrtModel
116116
int32_t mInputsTotal = 0, mOutputsTotal = 0; // Total number of inputs and outputs
117117

118118
// Environment settings
119-
bool mInitialized = false;
119+
bool mInitialized = false, mDeterministicMode = false;
120120
std::string mModelPath, mEnvName = "", mDeviceType = "CPU", mThreadAffinity = ""; // device options should be cpu, rocm, migraphx, cuda
121121
int32_t mIntraOpNumThreads = 1, mInterOpNumThreads = 1, mDeviceId = -1, mEnableProfiling = 0, mLoggingLevel = 0, mAllocateDeviceMemory = 0, mEnableOptimizations = 0;
122122

Common/ML/src/OrtInterface.cxx

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,10 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
6868
mEnableProfiling = (optionsMap.contains("enable-profiling") ? std::stoi(optionsMap["enable-profiling"]) : 0);
6969
mEnableOptimizations = (optionsMap.contains("enable-optimizations") ? std::stoi(optionsMap["enable-optimizations"]) : 0);
7070
mEnvName = (optionsMap.contains("onnx-environment-name") ? optionsMap["onnx-environment-name"] : "onnx_model_inference");
71+
mDeterministicMode = (optionsMap.contains("deterministic-compute") ? std::stoi(optionsMap["deterministic-compute"]) : 0);
7172

72-
if (mDeviceType == "CPU") {
73+
if (mDeviceType == "CPU")
74+
{
7375
(mPImplOrt->sessionOptions).SetIntraOpNumThreads(mIntraOpNumThreads);
7476
(mPImplOrt->sessionOptions).SetInterOpNumThreads(mInterOpNumThreads);
7577
if (mIntraOpNumThreads > 1 || mInterOpNumThreads > 1) {
@@ -99,6 +101,10 @@ void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsM
99101
(mPImplOrt->sessionOptions).DisableProfiling();
100102
}
101103

104+
if (mDeterministicMode > 0) {
105+
(mPImplOrt->sessionOptions).AddConfigEntry("session_options.use_deterministic_compute", "1");
106+
}
107+
102108
(mPImplOrt->sessionOptions).SetGraphOptimizationLevel(GraphOptimizationLevel(mEnableOptimizations));
103109
(mPImplOrt->sessionOptions).SetLogSeverityLevel(OrtLoggingLevel(mLoggingLevel));
104110

GPU/GPUTracking/Definitions/GPUSettingsList.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,7 @@ AddOption(nnInferenceOutputDType, std::string, "FP32", "", 0, "(std::string) Spe
256256
AddOption(nnInferenceIntraOpNumThreads, int, 1, "", 0, "Number of threads used to evaluate one neural network (ONNX: SetIntraOpNumThreads). 0 = auto-detect, can lead to problems on SLURM systems.")
257257
AddOption(nnInferenceInterOpNumThreads, int, 1, "", 0, "Number of threads used to evaluate one neural network (ONNX: SetInterOpNumThreads). 0 = auto-detect, can lead to problems on SLURM systems.")
258258
AddOption(nnInferenceEnableOrtOptimization, unsigned int, 99, "", 0, "Enables graph optimizations in ONNX Runtime. Can be [0, 1, 2, 99] -> see https://github.com/microsoft/onnxruntime/blob/3f71d637a83dc3540753a8bb06740f67e926dc13/include/onnxruntime/core/session/onnxruntime_c_api.h#L347")
259+
AddOption(nnInferenceUseDeterministicCompute, int, 0, "", 0, "Enables deterministic compute in ONNX Runtime were possible. Can be [0, 1] -> see https://github.com/microsoft/onnxruntime/blob/3b97d79b3c12dbf93aa0d563f345714596dc8ab6/onnxruntime/core/framework/session_options.h#L208")
259260
AddOption(nnInferenceOrtProfiling, int, 0, "", 0, "Enables profiling of model execution in ONNX Runtime")
260261
AddOption(nnInferenceOrtProfilingPath, std::string, ".", "", 0, "If nnInferenceOrtProfiling is set, the path to store the profiling data")
261262
AddOption(nnInferenceVerbosity, int, 1, "", 0, "0: No messages; 1: Warnings; 2: Warnings + major debugs; >3: All debugs")

GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizerHost.cxx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ void GPUTPCNNClusterizerHost::init(const GPUSettingsProcessingNNclusterizer& set
5454
{"intra-op-num-threads", std::to_string(settings.nnInferenceIntraOpNumThreads)},
5555
{"inter-op-num-threads", std::to_string(settings.nnInferenceInterOpNumThreads)},
5656
{"enable-optimizations", std::to_string(settings.nnInferenceEnableOrtOptimization)},
57+
{"deterministic-compute", std::to_string(settings.nnInferenceUseDeterministicCompute)}, // TODO: This unfortunately doesn't guarantee determinism (25.07.2025)
5758
{"enable-profiling", std::to_string(settings.nnInferenceOrtProfiling)},
5859
{"profiling-output-path", settings.nnInferenceOrtProfilingPath},
5960
{"logging-level", std::to_string(settings.nnInferenceVerbosity)},

0 commit comments

Comments
 (0)