Skip to content

Commit 815cc30

Browse files
committed
Modifying for Davids comments
1 parent e830697 commit 815cc30

File tree

6 files changed

+62
-108
lines changed

6 files changed

+62
-108
lines changed

GPU/GPUTracking/CMakeLists.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,8 +158,7 @@ set(HDRS_INSTALL
158158
)
159159

160160
set(SRCS_NO_CINT ${SRCS_NO_CINT} display/GPUDisplayInterface.cxx)
161-
set(SRCS_NO_CINT
162-
${SRCS_NO_CINT}
161+
set(SRCS_NO_CINT ${SRCS_NO_CINT}
163162
Global/GPUChainITS.cxx
164163
ITS/GPUITSFitter.cxx
165164
ITS/GPUITSFitterKernels.cxx

GPU/GPUTracking/Definitions/GPUSettingsList.h

Lines changed: 29 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,34 @@ AddOption(tpcTriggerHandling, bool, true, "", 0, "Enable TPC trigger handling")
221221
AddHelp("help", 'h')
222222
EndConfig()
223223

224+
BeginSubConfig(GPUSettingsProcessingNNclusterizer, nn, configStandalone.proc, "NN", 0, "Processing settings for neural network clusterizer", proc_nn)
225+
AddOption(applyNNclusterizer, int, 0, "", 0, "(bool, default = 0), if the neural network clusterizer should be used.")
226+
AddOption(nnInferenceDevice, std::string, "CPU", "", 0, "(std::string) Specify inference device (cpu (default), rocm, cuda)")
227+
AddOption(nnInferenceDeviceId, unsigned int, 0, "", 0, "(unsigned int) Specify inference device id")
228+
AddOption(nnInferenceAllocateDevMem, int, 0, "", 0, "(bool, default = 0), if the device memory should be allocated for inference")
229+
AddOption(nnInferenceDtype, std::string, "fp32", "", 0, "(std::string) Specify the datatype for which inference is performed (fp32: default, fp16)") // fp32 or fp16
230+
AddOption(nnInferenceThreadsPerNN, int, 0, "", 0, "Number of threads used to evaluate one neural network")
231+
AddOption(nnInferenceEnableOrtOptimization, unsigned int, 1, "", 0, "Enables graph optimizations in ONNX Runtime. Can be greater than 1!")
232+
AddOption(nnInferenceOrtProfiling, int, 0, "", 0, "Enables profiling of model execution in ONNX Runtime")
233+
AddOption(nnInferenceOrtProfilingPath, std::string, ".", "", 0, "If nnInferenceOrtProfiling is set, the path to store the profiling data")
234+
AddOption(nnInferenceVerbosity, int, 1, "", 0, "0: No messages; 1: Warnings; 2: Warnings + major debugs; >3: All debugs")
235+
AddOption(nnClusterizerAddIndexData, int, 1, "", 0, "If normalized index data (sector, row, pad), should be appended to the input")
236+
AddOption(nnClusterizerSizeInputRow, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
237+
AddOption(nnClusterizerSizeInputPad, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
238+
AddOption(nnClusterizerSizeInputTime, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
239+
AddOption(nnClusterizerUseCfRegression, int, 0, "", 0, "(bool, default = false) If true, use the regression from the native clusterizer and not the NN")
240+
AddOption(nnClusterizerApplyCfDeconvolution, int, 0, "", 0, "Applies the CFDeconvolution kernel before the digits to the network are filled")
241+
AddOption(nnClusterizerBatchedMode, unsigned int, 1, "", 0, "(int, default = 1) If >1, the NN is evaluated on batched input of size specified in this variable")
242+
AddOption(nnClusterizerVerbosity, int, -1, "", 0, "(int, default = -1) If >0, logging messages of the clusterizer will be displayed")
243+
AddOption(nnClusterizerBoundaryFillValue, int, -1, "", 0, "Fill value for the boundary of the input to the NN")
244+
AddOption(nnClusterizerApplyNoiseSupression, int, 1, "", 0, "Applies the NoiseSupression kernel before the digits to the network are filled")
245+
AddOption(nnClassificationPath, std::string, "network_class.onnx", "", 0, "The classification network path")
246+
AddOption(nnClassThreshold, float, 0.5, "", 0, "The cutoff at which clusters will be accepted / rejected.")
247+
AddOption(nnRegressionPath, std::string, "network_reg.onnx", "", 0, "The regression network path")
248+
AddOption(nnSigmoidTrafoClassThreshold, int, 1, "", 0, "If true (default), then the classification threshold is transformed by an inverse sigmoid function. This depends on how the network was trained (with a sigmoid as acitvation function in the last layer or not).")
249+
AddHelp("help", 'h')
250+
EndConfig()
251+
224252
BeginSubConfig(GPUSettingsProcessing, proc, configStandalone, "PROC", 0, "Processing settings", proc)
225253
AddOption(platformNum, int32_t, -1, "", 0, "Platform to use, in case the backend provides multiple platforms (OpenCL only, -1 = auto-select)")
226254
AddOption(deviceNum, int32_t, -1, "gpuDevice", 0, "Set GPU device to use (-1: automatic, -2: for round-robin usage in timeslice-pipeline)")
@@ -298,30 +326,7 @@ AddOption(printSettings, bool, false, "", 0, "Print all settings when initializi
298326
AddVariable(eventDisplay, o2::gpu::GPUDisplayFrontendInterface*, nullptr)
299327
AddSubConfig(GPUSettingsProcessingRTC, rtc)
300328
AddSubConfig(GPUSettingsProcessingParam, param)
301-
AddOption(applyNNclusterizer, int, 0, "", 0, "(bool, default = 0), if the neural network clusterizer should be used.")
302-
AddOption(nnInferenceDevice, std::string, "CPU", "", 0, "(std::string) Specify inference device (cpu (default), rocm, cuda)")
303-
AddOption(nnInferenceDeviceId, unsigned int, 0, "", 0, "(unsigned int) Specify inference device id")
304-
AddOption(nnInferenceAllocateDevMem, int, 0, "", 0, "(bool, default = 0), if the device memory should be allocated for inference")
305-
AddOption(nnInferenceDtype, std::string, "fp32", "", 0, "(std::string) Specify the datatype for which inference is performed (fp32: default, fp16)") // fp32 or fp16
306-
AddOption(nnInferenceThreadsPerNN, int, 0, "", 0, "Number of threads used to evaluate one neural network")
307-
AddOption(nnInferenceEnableOrtOptimization, unsigned int, 1, "", 0, "Enables graph optimizations in ONNX Runtime. Can be greater than 1!")
308-
AddOption(nnInferenceOrtProfiling, int, 0, "", 0, "Enables profiling of model execution in ONNX Runtime")
309-
AddOption(nnInferenceOrtProfilingPath, std::string, ".", "", 0, "If nnInferenceOrtProfiling is set, the path to store the profiling data")
310-
AddOption(nnInferenceVerbosity, int, 1, "", 0, "0: No messages; 1: Warnings; 2: Warnings + major debugs; >3: All debugs")
311-
AddOption(nnClusterizerAddIndexData, int, 1, "", 0, "If normalized index data (sector, row, pad), should be appended to the input")
312-
AddOption(nnClusterizerSizeInputRow, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
313-
AddOption(nnClusterizerSizeInputPad, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
314-
AddOption(nnClusterizerSizeInputTime, int, 3, "", 0, "Size of the input to the NN (currently calcualted as (length-1)/2")
315-
AddOption(nnClusterizerUseCFregression, int, 0, "", 0, "(bool, default = false) If true, use the regression from the native clusterizer and not the NN")
316-
AddOption(nnClusterizerBatchedMode, unsigned int, 1, "", 0, "(int, default = 1) If >1, the NN is evaluated on batched input of size specified in this variable")
317-
AddOption(nnClusterizerVerbosity, int, -1, "", 0, "(int, default = -1) If >0, logging messages of the clusterizer will be displayed")
318-
AddOption(nnClusterizerBoundaryFillValue, int, -1, "", 0, "Fill value for the boundary of the input to the NN")
319-
AddOption(nnClusterizerApplyNoiseSupression, int, 1, "", 0, "Applies the NoiseSupression kernel before the digits to the network are filled")
320-
AddOption(nnClusterizerApplyCfDeconvolution, int, 0, "", 0, "Applies the CFDeconvolution kernel before the digits to the network are filled")
321-
AddOption(nnClassificationPath, std::string, "network_class.onnx", "", 0, "The classification network path")
322-
AddOption(nnClassThreshold, float, 0.5, "", 0, "The cutoff at which clusters will be accepted / rejected.")
323-
AddOption(nnRegressionPath, std::string, "network_reg.onnx", "", 0, "The regression network path")
324-
AddOption(nnSigmoidTrafoClassThreshold, int, 1, "", 0, "If true (default), then the classification threshold is transformed by an inverse sigmoid function. This depends on how the network was trained (with a sigmoid as acitvation function in the last layer or not).")
329+
AddSubConfig(GPUSettingsProcessingNNclusterizer, nn)
325330
AddHelp("help", 'h')
326331
EndConfig()
327332
#endif // __OPENCL__

GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx

Lines changed: 27 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -889,23 +889,23 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput)
889889

890890
if (GetProcessingSettings().applyNNclusterizer) {
891891
// Settings for the clusterizer
892-
clusterer.nnClusterizerUseCFregression = GetProcessingSettings().nnClusterizerUseCFregression;
893-
clusterer.nnClusterizerSizeInputRow = GetProcessingSettings().nnClusterizerSizeInputRow;
894-
clusterer.nnClusterizerSizeInputPad = GetProcessingSettings().nnClusterizerSizeInputPad;
895-
clusterer.nnClusterizerSizeInputTime = GetProcessingSettings().nnClusterizerSizeInputTime;
896-
clusterer.nnClusterizerAddIndexData = GetProcessingSettings().nnClusterizerAddIndexData;
897-
clusterer.nnClusterizerElementSize = ((2 * clusterer.nnClusterizerSizeInputRow + 1) * (2 * clusterer.nnClusterizerSizeInputPad + 1) * (2 * clusterer.nnClusterizerSizeInputTime + 1)) + (clusterer.nnClusterizerAddIndexData ? 3 : 0);
898-
clusterer.nnClusterizerBatchedMode = GetProcessingSettings().nnClusterizerBatchedMode;
899-
clusterer.nnClusterizerBoundaryFillValue = GetProcessingSettings().nnClusterizerBoundaryFillValue;
892+
(clusterer.nnInternals)->nnClusterizerUseCfRegression = GetProcessingSettings().nnClusterizerUseCfRegression;
893+
(clusterer.nnInternals)->nnClusterizerSizeInputRow = GetProcessingSettings().nnClusterizerSizeInputRow;
894+
(clusterer.nnInternals)->nnClusterizerSizeInputPad = GetProcessingSettings().nnClusterizerSizeInputPad;
895+
(clusterer.nnInternals)->nnClusterizerSizeInputTime = GetProcessingSettings().nnClusterizerSizeInputTime;
896+
(clusterer.nnInternals)->nnClusterizerAddIndexData = GetProcessingSettings().nnClusterizerAddIndexData;
897+
(clusterer.nnInternals)->nnClusterizerElementSize = ((2 * (clusterer.nnInternals)->nnClusterizerSizeInputRow + 1) * (2 * (clusterer.nnInternals)->nnClusterizerSizeInputPad + 1) * (2 * (clusterer.nnInternals)->nnClusterizerSizeInputTime + 1)) + ((clusterer.nnInternals)->nnClusterizerAddIndexData ? 3 : 0);
898+
(clusterer.nnInternals)->nnClusterizerBatchedMode = GetProcessingSettings().nnClusterizerBatchedMode;
899+
(clusterer.nnInternals)->nnClusterizerBoundaryFillValue = GetProcessingSettings().nnClusterizerBoundaryFillValue;
900900
if (GetProcessingSettings().nnClusterizerVerbosity < 0){
901-
clusterer.nnClusterizerVerbosity = GetProcessingSettings().nnInferenceVerbosity;
901+
(clusterer.nnInternals)->nnClusterizerVerbosity = GetProcessingSettings().nnInferenceVerbosity;
902902
} else {
903-
clusterer.nnClusterizerVerbosity = GetProcessingSettings().nnClusterizerVerbosity;
903+
(clusterer.nnInternals)->nnClusterizerVerbosity = GetProcessingSettings().nnClusterizerVerbosity;
904904
}
905905

906906
// Settings for the NN evaluation
907-
clusterer.nnClassThreshold = GetProcessingSettings().nnClassThreshold;
908-
clusterer.nnSigmoidTrafoClassThreshold = GetProcessingSettings().nnSigmoidTrafoClassThreshold;
907+
(clusterer.nnInternals)->nnClassThreshold = GetProcessingSettings().nnClassThreshold;
908+
(clusterer.nnInternals)->nnSigmoidTrafoClassThreshold = GetProcessingSettings().nnSigmoidTrafoClassThreshold;
909909

910910
// Settings for the neural network evaluation
911911
clusterer.OrtOptions = {
@@ -922,7 +922,7 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput)
922922
clusterer.model_class.init(clusterer.OrtOptions);
923923
std::vector<std::string> reg_model_paths = o2::utils::Str::tokenize(GetProcessingSettings().nnRegressionPath, ':');
924924

925-
if (!clusterer.nnClusterizerUseCFregression) {
925+
if (!(clusterer.nnInternals)->nnClusterizerUseCfRegression) {
926926
if (clusterer.model_class.getNumOutputNodes()[0][1] == 1 || reg_model_paths.size() == 1) {
927927
clusterer.OrtOptions["model-path"] = reg_model_paths[0];
928928
clusterer.model_reg_1.init(clusterer.OrtOptions);
@@ -934,50 +934,50 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput)
934934
}
935935
}
936936

937-
if (clusterer.nnClusterizerUseCFregression || (int)(GetProcessingSettings().nnClusterizerApplyCfDeconvolution)) {
937+
if ((clusterer.nnInternals)->nnClusterizerUseCfRegression || (int)(GetProcessingSettings().nnClusterizerApplyCfDeconvolution)) {
938938
runKernel<GPUTPCCFDeconvolution>({GetGrid(clusterer.mPmemory->counters.nPositions, lane), {iSlice}});
939939
DoDebugAndDump(RecoStep::TPCClusterFinding, 262144 << 4, clusterer, &GPUTPCClusterFinder::DumpChargeMap, *mDebugFile, "Split Charges");
940940
}
941941

942-
if (clusterer.nnSigmoidTrafoClassThreshold) {
942+
if ((clusterer.nnInternals)->nnSigmoidTrafoClassThreshold) {
943943
// Inverse sigmoid transformation
944-
clusterer.nnClassThreshold = (float)std::log(clusterer.nnClassThreshold / (1.f - clusterer.nnClassThreshold));
944+
(clusterer.nnInternals)->nnClassThreshold = (float)std::log((clusterer.nnInternals)->nnClassThreshold / (1.f - (clusterer.nnInternals)->nnClassThreshold));
945945
}
946946

947947
float time_clusterizer = 0, time_fill = 0;
948948
int evalDtype = clusterer.OrtOptions["dtype"].find("32") != std::string::npos;
949949
clusterer.outputDataClass.resize(clusterer.mPmemory->counters.nClusters, -1);
950950

951-
for(int batch = 0; batch < std::ceil((float)clusterer.mPmemory->counters.nClusters / clusterer.nnClusterizerBatchedMode); batch++) {
952-
uint batchStart = batch * clusterer.nnClusterizerBatchedMode;
953-
uint iSize = CAMath::Min((uint)clusterer.nnClusterizerBatchedMode, (uint)(clusterer.mPmemory->counters.nClusters - batchStart));
951+
for(int batch = 0; batch < std::ceil((float)clusterer.mPmemory->counters.nClusters / (clusterer.nnInternals)->nnClusterizerBatchedMode); batch++) {
952+
uint batchStart = batch * (clusterer.nnInternals)->nnClusterizerBatchedMode;
953+
uint iSize = CAMath::Min((uint)(clusterer.nnInternals)->nnClusterizerBatchedMode, (uint)(clusterer.mPmemory->counters.nClusters - batchStart));
954954

955955
clusterer.peakPositions.resize(iSize);
956956
clusterer.centralCharges.resize(iSize);
957957

958958
if (evalDtype == 1) {
959-
clusterer.inputData32.resize(iSize * clusterer.nnClusterizerElementSize, (float)(clusterer.nnClusterizerBoundaryFillValue));
959+
clusterer.inputData32.resize(iSize * (clusterer.nnInternals)->nnClusterizerElementSize, (float)((clusterer.nnInternals)->nnClusterizerBoundaryFillValue));
960960
} else {
961-
clusterer.inputData16.resize(iSize * clusterer.nnClusterizerElementSize, (OrtDataType::Float16_t)((float)clusterer.nnClusterizerBoundaryFillValue));
961+
clusterer.inputData16.resize(iSize * (clusterer.nnInternals)->nnClusterizerElementSize, (OrtDataType::Float16_t)((float)(clusterer.nnInternals)->nnClusterizerBoundaryFillValue));
962962
}
963963

964964
auto start0 = std::chrono::high_resolution_clock::now();
965965
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::fillInputNN>({GetGrid(iSize, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, batchStart); // Filling the data
966966
auto stop0 = std::chrono::high_resolution_clock::now();
967967

968968
auto start1 = std::chrono::high_resolution_clock::now();
969-
GPUTPCNNClusterizer::applyNetworkClass(clusterer, evalDtype);
969+
GPUTTPCNNClusterizerInference::inferenceNetworkClass(clusterer, evalDtype);
970970
if (clusterer.model_class.getNumOutputNodes()[0][1] == 1){
971971
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::determineClass1Labels>({GetGrid(iSize, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, batchStart); // Assigning class labels
972972
} else {
973973
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::determineClass2Labels>({GetGrid(iSize, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, batchStart); // Assigning class labels
974974
}
975975

976-
if (!clusterer.nnClusterizerUseCFregression) {
977-
GPUTPCNNClusterizer::applyNetworkReg1(clusterer, evalDtype);
976+
if (!(clusterer.nnInternals)->nnClusterizerUseCfRegression) {
977+
GPUTTPCNNClusterizerInference::inferenceNetworkReg1(clusterer, evalDtype);
978978
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::publishClass1Regression>({GetGrid(iSize, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, batchStart); // Running the NN for regression class 1
979979
if (clusterer.model_class.getNumOutputNodes()[0][1] > 1 && reg_model_paths.size() > 1) {
980-
GPUTPCNNClusterizer::applyNetworkReg2(clusterer, evalDtype);
980+
GPUTTPCNNClusterizerInference::inferenceNetworkReg2(clusterer, evalDtype);
981981
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::publishClass2Regression>({GetGrid(iSize, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, batchStart); // Running the NN for regression class 2
982982
}
983983
}
@@ -989,17 +989,16 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput)
989989
}
990990

991991
auto start1 = std::chrono::high_resolution_clock::now();
992-
if(clusterer.nnClusterizerUseCFregression) {
992+
if((clusterer.nnInternals)->nnClusterizerUseCfRegression) {
993993
runKernel<GPUTPCNNClusterizer, GPUTPCNNClusterizer::runCfClusterizer>({GetGrid(clusterer.mPmemory->counters.nClusters, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, evalDtype, 0, 0); // Running the CF regression kernel - no batching needed: batchStart = 0
994994
}
995995
auto stop1 = std::chrono::high_resolution_clock::now();
996996
time_clusterizer += std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1).count() / 1e9;
997997

998-
if (clusterer.nnClusterizerVerbosity < 3) {
998+
if ((clusterer.nnInternals)->nnClusterizerVerbosity < 3) {
999999
LOG(info) << "[NN CF] Apply NN (fragment " << fragment.index << ", lane: " << lane << ", slice: " << iSlice << "): filling data " << time_fill << "s ; clusterizer: " << time_clusterizer << "s ; " << clusterer.mPmemory->counters.nClusters << " clusters --> " << clusterer.mPmemory->counters.nClusters / (time_fill + time_clusterizer) << " clusters/s";
10001000
}
10011001
} else {
1002-
runKernel<GPUTPCCFDeconvolution>({GetGrid(clusterer.mPmemory->counters.nPositions, lane), {iSlice}});
10031002
DoDebugAndDump(RecoStep::TPCClusterFinding, 262144 << 4, clusterer, &GPUTPCClusterFinder::DumpChargeMap, *mDebugFile, "Split Charges");
10041003
runKernel<GPUTPCCFClusterizer>({GetGrid(clusterer.mPmemory->counters.nClusters, lane, GPUReconstruction::krnlDeviceType::CPU), {iSlice}}, 0);
10051004
}

GPU/GPUTracking/TPCClusterFinder/GPUTPCClusterFinder.h

Lines changed: 4 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,7 @@
1919
#include "GPUProcessor.h"
2020
#include "GPUDataTypes.h"
2121
#include "CfFragment.h"
22-
#include "ML/OrtInterface.h"
23-
#include "ML/3rdparty/GPUORTFloat16.h"
22+
#include "GPUTPCNNClusterizerInternals.h"
2423

2524
using namespace o2::ml;
2625

@@ -54,6 +53,8 @@ struct ChargePos;
5453

5554
class GPUTPCGeometry;
5655

56+
class GPUTPCNNClusterizerInternals;
57+
5758
class GPUTPCClusterFinder : public GPUProcessor
5859
{
5960
public:
@@ -145,29 +146,7 @@ class GPUTPCClusterFinder : public GPUProcessor
145146
int16_t mZSOffsetId = -1;
146147
int16_t mOutputId = -1;
147148

148-
int nnClusterizerSizeInputRow = 3;
149-
int nnClusterizerSizeInputPad = 3;
150-
int nnClusterizerSizeInputTime = 3;
151-
int nnClusterizerElementSize = -1;
152-
bool nnClusterizerAddIndexData = true;
153-
float nnClassThreshold = 0.16;
154-
bool nnSigmoidTrafoClassThreshold = 1;
155-
int nnClusterizerUseCFregression = 0;
156-
int nnClusterizerBatchedMode = 1;
157-
int nnClusterizerVerbosity = 0;
158-
int nnClusterizerBoundaryFillValue = -1;
159-
160-
// Memory allocation for neural network
161-
uint class2_elements = 0;
162-
std::vector<float> inputData32;
163-
std::vector<OrtDataType::Float16_t> inputData16;
164-
std::vector<float> outputDataClass, modelProbabilities, outputDataReg1, outputDataReg2;
165-
166-
std::vector<ChargePos> peakPositions;
167-
std::vector<float> centralCharges;
168-
169-
std::unordered_map<std::string, std::string> OrtOptions;
170-
OrtModel model_class, model_reg_1, model_reg_2; // For splitting clusters
149+
GPUTPCNNClusterizerInternals* nnInternals;
171150

172151
#ifndef GPUCA_GPUCODE
173152
void DumpDigits(std::ostream& out);

0 commit comments

Comments
 (0)