Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,7 @@ def MakeKerasPermute(layer):
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
if len(fAttributePermute) > 0:
fAttributePermute = [0] + fAttributePermute # for the batch dimension from the input
op = SOFIE.ROperator_Transpose("float")(
fAttributePermute, fLayerInputName, fLayerOutputName
) # SOFIE.fPermuteDims
else:
op = SOFIE.ROperator_Transpose("float")(fLayerInputName, fLayerOutputName)
op = SOFIE.ROperator_Transpose(fAttributePermute, fLayerInputName, fLayerOutputName)
return op
else:
raise RuntimeError(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def move_operator(op):
fLayerOutput = outputs[0]
if fLayerType == "GlobalAveragePooling2D":
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
op = SOFIE.ROperator_Transpose([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
rmodel.AddOperator(move_operator(op))
inputs[0] = LayerName + "PreTrans"
outputs[0] = LayerName + "Squeeze"
Expand All @@ -186,23 +186,23 @@ def move_operator(op):
fAttrPerm = list(range(0, num_input_shapes))
fAttrPerm[1] = axis
fAttrPerm[axis] = 1
op = SOFIE.ROperator_Transpose("float")(fAttrPerm, inputs[0], LayerName + "PreTrans")
op = SOFIE.ROperator_Transpose(fAttrPerm, inputs[0], LayerName + "PreTrans")
rmodel.AddOperator(move_operator(op))
inputs[0] = LayerName + "PreTrans"
outputs[0] = LayerName + "PostTrans"
rmodel.AddOperator(move_operator(mapKerasLayer[fLayerType](layer_data)))
op = SOFIE.ROperator_Transpose("float")(fAttrPerm, LayerName + "PostTrans", fLayerOutput)
op = SOFIE.ROperator_Transpose(fAttrPerm, LayerName + "PostTrans", fLayerOutput)
rmodel.AddOperator(move_operator(op))

elif fLayerType == "MaxPooling2D" or fLayerType == "AveragePooling2D":
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
op = SOFIE.ROperator_Transpose([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
rmodel.AddOperator(move_operator(op))
inputs[0] = LayerName + "PreTrans"
outputs[0] = LayerName + "PostTrans"
rmodel.AddOperator(move_operator(mapKerasLayer[fLayerType](layer_data)))
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput)
op = SOFIE.ROperator_Transpose([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput)
rmodel.AddOperator(move_operator(op))

else:
Expand Down Expand Up @@ -237,7 +237,7 @@ def move_operator(op):
# if the data format is channels last (can be set to channels first by the user).
if fLayerType == "Conv2D":
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
op = SOFIE.ROperator_Transpose([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
rmodel.AddOperator(move_operator(op))
inputs[0] = LayerName + "PreTrans"
layer_data["layerInput"] = inputs
Expand All @@ -248,7 +248,7 @@ def move_operator(op):
Activation_layer_input = LayerName + fLayerType
if fLayerType == "Conv2D":
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")(
op = SOFIE.ROperator_Transpose(
[0, 2, 3, 1], LayerName + fLayerType, LayerName + "PostTrans"
)
rmodel.AddOperator(move_operator(op))
Expand All @@ -268,15 +268,15 @@ def move_operator(op):
outputs = layer_data["layerOutput"]
fLayerOutput = outputs[0]
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
op = SOFIE.ROperator_Transpose([0, 3, 1, 2], inputs[0], LayerName + "PreTrans")
rmodel.AddOperator(move_operator(op))
inputs[0] = LayerName + "PreTrans"
layer_data["layerInput"] = inputs
outputs[0] = LayerName + "PostTrans"
rmodel.AddOperator(move_operator(mapKerasLayerWithActivation[fLayerType](layer_data)))
if fLayerType == "Conv2D":
if layer_data["channels_last"]:
op = SOFIE.ROperator_Transpose("float")([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput)
op = SOFIE.ROperator_Transpose([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput)
rmodel.AddOperator(move_operator(op))
return rmodel
else:
Expand Down
3 changes: 3 additions & 0 deletions tmva/sofie/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,13 @@ ROOT_STANDARD_LIBRARY_PACKAGE(ROOTTMVASofie
TMVA/ROperator_Einsum.hxx
TMVA/ROperator_Random.hxx
TMVA/ROperator_ScatterElements.hxx
TMVA/ROperator_ScatterND.hxx
TMVA/ROperator_Gather.hxx
TMVA/ROperator_GatherND.hxx
TMVA/ROperator_NonZero.hxx
TMVA/ROperator_Not.hxx
TMVA/ROperator_Clip.hxx
TMVA/ROperator_Gelu.hxx
TMVA/SOFIE_common.hxx
TMVA/SOFIEHelpers.hxx

Expand Down
6 changes: 6 additions & 0 deletions tmva/sofie/inc/TMVA/RModel.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class RModel final : public RModel_Base {
private:
bool fIsInitialized = false;
bool fIsSubGraph = false;
bool fUseVDT = false;
int fVerbose = 0;
int fBatchSize = -1;
long fReadPos = 0; // reading file position
Expand Down Expand Up @@ -230,6 +231,11 @@ public:
void HeadInitializedTensors(std::string name, int n_print = 50);

bool UseSession() const { return fUseSession; }
// flag to use vdt for fast math functions (e.g. exp in softmax)
void SetUseVDT(bool on) {
fUseVDT = on;
}
bool UseVDT() const { return fUseVDT;}

// Use the ClassDef macro to allow definition of custom streaming
ClassDefNV(RModel, 3);
Expand Down
59 changes: 55 additions & 4 deletions tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ public:
auto ret = UTILITY::MultidirectionalBroadcastShape(fShapeA, fShapeB);
fBroadcastFlag = ret.first;
fShapeY = ret.second;
auto lengthY = ConvertShapeToLength(fShapeY);
if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
bool broadcast = fBroadcastFlag > 0;
if (broadcast) {
Expand Down Expand Up @@ -193,7 +194,7 @@ public:
const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());
auto dataB = static_cast<T *>(model.GetInitializedTensorData(nameB).get());
std::vector<T> dataY(ConvertShapeToLength(fShapeY));
std::vector<T> dataY(lengthY);
for (size_t i = 0; i < dataY.size(); i++) {
dataY[i] = BinaryOperatorTrait<T, Op>::Func(dataA[i], dataB[i]);
}
Expand All @@ -207,6 +208,59 @@ public:
<< " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
<< ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(dataY) << std::endl;
}
} else if (((model.IsShapeTensor(fNA) && model.IsShapeTensor(fNB)) ||
(model.IsShapeTensor(fNA) && model.IsConstantTensor(fNB)) ||
(model.IsShapeTensor(fNB) && model.IsConstantTensor(fNA)))
&& (fShapeA.size() <=1 && fShapeB.size() <=1 && model.GetTensorType(fNA) == ETensorType::INT64)) {
// case of shape tensors ( tensors are of rank 0 or 1 )
std::vector<Dim> dimValA;
std::vector<Dim> dimValB;
if (model.IsShapeTensor(fNA))
dimValA = model.GetShapeTensorValues(fNA);
if (model.IsShapeTensor(fNB))
dimValB = model.GetShapeTensorValues(fNB);
// adjust for broadcasting - repet values until it reaches shapes of Y
if (!fShapeY.empty() && fShapeY[0] > 1) {
if (dimValA.size() == 1) dimValA = std::vector<Dim>( fShapeY[0], dimValA[0]);
if (dimValB.size() == 1) dimValB = std::vector<Dim>( fShapeY[0], dimValB[0]);
}

auto convertDataToDim = [&](const std::string & name, const std::vector<size_t> & shape, std::vector<Dim> & dimValues) {
auto data = static_cast<int64_t *>(model.GetInitializedTensorData(name).get());
dimValues.resize(lengthY);
for (size_t i = 0; i < lengthY; i++) {
if (!shape.empty() && lengthY == shape[0])
dimValues[i] = Dim{ static_cast<size_t>(data[i])};
else // case dataA is a scalar
dimValues[i] = Dim{ static_cast<size_t>(data[0])};
}
};
if (model.IsConstantTensor(fNA)) {
convertDataToDim(fNA,fShapeA,dimValA);
} else if (model.IsConstantTensor(fNB)) {
convertDataToDim(fNB,fShapeB,dimValB);
}

//perform binary operations on shape tensors
std::vector<Dim> dimValY(lengthY);
for (size_t i = 0; i < lengthY; i++) {
if (!dimValA[i].isParam && !dimValB[i].isParam) {
size_t d = BinaryOperatorTrait<size_t, Op>::Func(dimValA[i].dim, dimValB[i].dim);
dimValY[i] = Dim{d};
} else {
auto res = BinaryOperatorTrait<T, Op>::Op(dimValA[i].GetVal(), dimValB[i].GetVal());
dimValY[i] = Dim{res, static_cast<size_t>(-1)};
}
}
model.AddShapeTensor(fNY,dimValY, fShapeY.empty()); // cannot be a scalar
if (model.Verbose()) {
std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
<< " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
<< ConvertShapeToString(fShapeY) << " : " << ConvertDimShapeToString(dimValY) << " (shape)" << std::endl;
}
// no code needs to be generated (flag this as a constant output tensor)
fIsOutputConstant = true;

} else {
// case of defined and non-constant tensors
model.AddIntermediateTensor(fNY, model.GetTensorType(fNA), fShapeY);
Expand Down Expand Up @@ -279,9 +333,6 @@ public:

opName = "op_" + opName;

if (fDimShapeY.empty()) {
throw std::runtime_error("TMVA SOFIE Binary Op called to Generate without being initialized first");
}
std::stringstream out;
out << SP << "\n//------ " << opName << " " << BinaryOperatorTrait<T, Op>::Name() << " --> "
<< ConvertDimShapeToString(fDimShapeY) << "\n";
Expand Down
22 changes: 16 additions & 6 deletions tmva/sofie/inc/TMVA/ROperator_BasicUnary.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace TMVA {
namespace Experimental {
namespace SOFIE {

enum class EBasicUnaryOperator { kReciprocal, kSqrt , kNeg, kExp, kLog, kSin, kCos, kAbs, kSoftplus };
enum class EBasicUnaryOperator { kReciprocal, kSqrt , kNeg, kExp, kLog, kSin, kCos, kAbs, kSoftplus, kAtan, kFloor };

template <typename T, EBasicUnaryOperator Op>
struct UnaryOpTraits {
Expand Down Expand Up @@ -69,6 +69,18 @@ struct UnaryOpTraits<T, EBasicUnaryOperator::kSoftplus> {
static std::string Op(const std::string &X) { return "std::log(std::exp(" + X + ") + 1)"; }
};

template <typename T>
struct UnaryOpTraits<T, EBasicUnaryOperator::kAtan> {
static std::string Name() { return "Atan"; }
static std::string Op(const std::string &X) { return "std::atan(" + X + ")"; }
};

template <typename T>
struct UnaryOpTraits<T, EBasicUnaryOperator::kFloor> {
static std::string Name() { return "Floor"; }
static std::string Op(const std::string &X) { return "std::floor(" + X + ")"; }
};

template <typename T, EBasicUnaryOperator Op>
class ROperator_BasicUnary final : public ROperator {
private:
Expand Down Expand Up @@ -99,6 +111,8 @@ public:
fShapeX = model.GetDimTensorShape(fNX);
fShapeY = fShapeX;
model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);

model.AddNeededStdLib("cmath");
}

std::string Generate(std::string OpName) override
Expand All @@ -115,11 +129,7 @@ public:
}

std::vector<std::string> GetStdLibs() override {
if (Op == EBasicUnaryOperator::kSqrt || Op == EBasicUnaryOperator::kExp || Op == EBasicUnaryOperator::kLog) {
return { std::string("cmath") };
} else {
return {};
}
return { std::string("cmath") };
}
};

Expand Down
19 changes: 10 additions & 9 deletions tmva/sofie/inc/TMVA/ROperator_Cast.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,14 @@ private:
std::string fNX;
std::string fNY;
std::vector<Dim> fShape;
std::string fAttrType = "float";
ETensorType fType;

public:
ROperator_Cast(){}
ROperator_Cast(std::string attr_type,std::string nameX, std::string nameY):
fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)),
fAttrType(attr_type) {
ROperator_Cast(ETensorType type,std::string nameX, std::string nameY):
fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)),
fType(type)
{
fInputTensorNames = { fNX };
fOutputTensorNames = { fNY };
}
Expand All @@ -51,21 +52,21 @@ public:
if (model.IsInitializedTensor(fNX)) {
fIsOutputConstant = true;
auto inputData = model.GetInitializedTensorData(fNX);
if (ConvertStringToType(fAttrType) == ETensorType::INT64) {
if (fType == ETensorType::INT64) {
model.AddConstantTensor<int64_t>(fNY, ConvertShapeToInt(fShape), static_cast<int64_t*>(inputData.get()));
model.SetNotWritableInitializedTensor(fNX);
}
else
fIsOutputConstant = false;
} else if (model.IsShapeTensor(fNX) && ConvertStringToType(fAttrType) == ETensorType::INT64) {
} else if (model.IsShapeTensor(fNX) && fType == ETensorType::INT64) {
auto shapeData = model.GetShapeTensorValues(fNX);
model.AddShapeTensor(fNY, shapeData, fShape.size() == 0);
fIsOutputConstant = true;
}
if (!fIsOutputConstant)
model.AddIntermediateTensor(fNY, ConvertStringToType(fAttrType), fShape);
model.AddIntermediateTensor(fNY, fType, fShape);
if (model.Verbose()) {
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << fAttrType << " for " << fNY
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << ConvertTypeToString(fType) << " for " << fNY
<< " shape " << ConvertDimShapeToString(fShape);
if (fIsOutputConstant) std::cout << " (constant) ";
std::cout << std::endl;
Expand All @@ -86,7 +87,7 @@ public:

out << SP << "for (int id = 0; id < " << length << " ; id++){\n";

out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< fAttrType << ">(tensor_" << fNX << "[id]);\n";
out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< ConvertTypeToString(fType) << ">(tensor_" << fNX << "[id]);\n";

out << SP << "}\n";
return out.str();
Expand Down
Loading
Loading