Skip to content
This repository was archived by the owner on Jan 26, 2026. It is now read-only.

Commit 19b18db

Browse files
committed
adding Dependence Manager for cleaner handling of deps (input, outpput and internal)
1 parent f968b8b commit 19b18db

File tree

9 files changed

+189
-163
lines changed

9 files changed

+189
-163
lines changed

src/Creator.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ struct DeferredArange : public Deferred
159159
// set_value(std::move(TypeDispatch<x::Creator>(_dtype, _start, _end, _step)));
160160
};
161161

162-
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
162+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::DepManager & dm) override
163163
{
164164
// create start, stop and step
165165
auto start = jit::createI64(loc, builder, _start);
@@ -170,13 +170,13 @@ struct DeferredArange : public Deferred
170170
assert(_dtype == INT64 || _dtype == UINT64); // FIXME
171171
llvm::SmallVector<int64_t> shape(1, -1); //::mlir::ShapedType::kDynamicSize);
172172
auto artype = ::imex::ptensor::PTensorType::get(builder.getContext(), ::mlir::RankedTensorType::get(shape, dtype), true);
173-
auto ar = builder.create<::imex::ptensor::ARangeOp>(loc, artype, start, end, step, true);
174-
auto setter = [this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
173+
dm.addVal(guid(),
174+
builder.create<::imex::ptensor::ARangeOp>(loc, artype, start, end, step, true),
175+
[this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
175176
assert(rank == 1);
176177
assert(strides[0] == 1);
177178
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
178-
};
179-
ivm[_guid] = {ar, setter};
179+
});
180180
return false;
181181
}
182182

src/Deferred.cpp

Lines changed: 7 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -60,25 +60,6 @@ void Runable::fini()
6060
_deferred.clear();
6161
}
6262

63-
#if 0
64-
class DepManager
65-
{
66-
private:
67-
IdValueMap _ivm;
68-
std::unordered_set<id_type> _args;
69-
public:
70-
::mlir::Value getDependent(i::mlir::OpBuilder & builder, d_type guid)
71-
{
72-
if(auto d = _ivm.find(guid); d == _ivm.end()) {
73-
_func.insertArg
74-
_ivm[guid] = {val, {}}
75-
} else {
76-
return d->second.first;
77-
}
78-
}
79-
};
80-
#endif
81-
8263
void process_promises()
8364
{
8465
bool done = false;
@@ -87,7 +68,6 @@ void process_promises()
8768
do {
8869
::mlir::OpBuilder builder(&jit._context);
8970
auto loc = builder.getUnknownLoc();
90-
jit::IdValueMap ivp;
9171

9272
// Create a MLIR module
9373
auto module = builder.create<::mlir::ModuleOp>(loc);
@@ -104,11 +84,13 @@ void process_promises()
10484
// we need to keep runables/deferred/futures alive until we set their values below
10585
std::vector<Runable::ptr_type> runables;
10686

87+
jit::DepManager dm(function);
88+
10789
while(true) {
10890
Runable::ptr_type d;
10991
_deferred.pop(d);
11092
if(d) {
111-
if(d->generate_mlir(builder, loc, ivp)) {
93+
if(d->generate_mlir(builder, loc, dm)) {
11294
d.reset();
11395
break;
11496
};
@@ -123,39 +105,8 @@ void process_promises()
123105

124106
if(runables.empty()) continue;
125107

126-
// Now we have to define the return type as a ValueRange of all arrays which we have created
127-
// (runnables have put them into ivp when generating mlir)
128-
// We also compute the total size of the struct llvm created for this return type
129-
// llvm will basically return a struct with all the arrays as members, each of type JIT::MemRefDescriptor
130-
131-
// Need a container to put all return values, will be used to construct TypeRange
132-
std::vector<::mlir::Type> ret_types;
133-
ret_types.reserve(ivp.size());
134-
std::vector<::mlir::Value> ret_values;
135-
ret_types.reserve(ivp.size());
136-
std::unordered_map<id_type, uint64_t> rank_map;
137-
// here we store the total size of the llvm struct
138-
uint64_t sz = 0;
139-
for(auto & v : ivp) {
140-
auto value = v.second.first;
141-
// append the type and array/value
142-
ret_types.push_back(value.getType());
143-
ret_values.push_back(value);
144-
auto ptt = value.getType().dyn_cast<::imex::ptensor::PTensorType>();
145-
assert(ptt);
146-
auto rank = ptt.getRtensor().getShape().size();
147-
rank_map[v.first] = rank;
148-
// add sizeof(MemRefDescriptor<elementtype, rank>) to sz
149-
sz += 3 + 2 * rank;
150-
}
151-
::mlir::TypeRange ret_tr(ret_types);
152-
::mlir::ValueRange ret_vr(ret_values);
153-
154-
// add return statement
155-
auto ret_value = builder.create<::mlir::func::ReturnOp>(loc, ret_vr);
156-
// Define and assign correct function type
157-
auto funcTypeAttr = ::mlir::TypeAttr::get(builder.getFunctionType({}, ret_tr));
158-
function.setFunctionTypeAttr(funcTypeAttr);
108+
// create return statement and adjust function type
109+
uint64_t sz = dm.handleResult(builder);
159110
// also request generation of c-wrapper function
160111
function->setAttr(::mlir::LLVM::LLVMDialect::getEmitCWrapperAttrName(), ::mlir::UnitAttr::get(&jit._context));
161112
// add the function to the module
@@ -165,22 +116,10 @@ void process_promises()
165116
// compile and run the module
166117
assert(sizeof(intptr_t) == sizeof(void*));
167118
intptr_t * output = new intptr_t[sz];
168-
std::cout << ivp.size() << " sz: " << sz << std::endl;
169119
if(jit.run(module, fname, output)) throw std::runtime_error("failed running jit");
170120

171-
// push results to fulfill promises
172-
size_t pos = 0;
173-
for(auto & v : ivp) {
174-
auto value = v.second.first;
175-
auto rank = rank_map[v.first];
176-
void * allocated = (void*)output[pos];
177-
void * aligned = (void*)output[pos+1];
178-
intptr_t offset = output[pos+2];
179-
intptr_t * sizes = output + pos + 3;
180-
intptr_t * stride = output + pos + 3 + rank;
181-
pos += 3 + 2 * rank;
182-
v.second.second(rank, allocated, aligned, offset, sizes, stride);
183-
}
121+
// push results to deliver promises
122+
dm.deliver(output, sz);
184123
} while(!done);
185124
}
186125

src/EWBinOp.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -456,15 +456,16 @@ struct DeferredEWBinOp : public Deferred
456456
#endif
457457
}
458458

459-
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
459+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::DepManager & dm) override
460460
{
461461
// FIXME the type of the result is based on a only
462-
auto rtyp = ivm[_a].first.getType();
463-
auto ewbo = builder.create<::imex::ptensor::EWBinOp>(loc, rtyp, builder.getI32IntegerAttr(ddpt2mlir(_op)), ivm[_a].first, ivm[_b].first);
464-
auto setter = [this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
462+
auto a = dm.getDependent(builder, _a);
463+
auto b = dm.getDependent(builder, _b);
464+
dm.addVal(guid(),
465+
builder.create<::imex::ptensor::EWBinOp>(loc, a.getType(), builder.getI32IntegerAttr(ddpt2mlir(_op)), a, b),
466+
[this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
465467
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
466-
};
467-
ivm[_guid] = {ewbo, setter};
468+
});
468469
return false;
469470
}
470471

src/ReduceOp.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,10 @@ struct DeferredReduceOp : public Deferred
116116
#endif
117117
}
118118

119-
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
119+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::DepManager & dm) override
120120
{
121121
// FIXME reduction over individual dimensions is not supported
122-
auto a = ivm[_a].first;
122+
auto a = dm.getDependent(builder, _a);
123123
auto a_ptt = a.getType().dyn_cast<::imex::ptensor::PTensorType>();
124124
assert(a_ptt);
125125

@@ -128,11 +128,11 @@ struct DeferredReduceOp : public Deferred
128128
::mlir::RankedTensorType::get(llvm::SmallVector<int64_t>(), a_ptt.getRtensor().getElementType()),
129129
true
130130
);
131-
auto rop = builder.create<::imex::ptensor::ReductionOp>(loc, rtyp, builder.getI32IntegerAttr(ddpt2mlir(_op)), a);
132-
auto setter = [this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
131+
dm.addVal(guid(),
132+
builder.create<::imex::ptensor::ReductionOp>(loc, rtyp, builder.getI32IntegerAttr(ddpt2mlir(_op)), a),
133+
[this](uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides) {
133134
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
134-
};
135-
ivm[_guid] = {rop, setter};
135+
});
136136
return false;
137137
}
138138

src/Service.cpp

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,12 @@ struct DeferredService : public Deferred
6868
#endif
6969
}
7070

71-
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
71+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::DepManager & dm) override
7272
{
7373
switch(_op) {
7474
case DROP:
75-
if(auto e = ivm.find(_a); e != ivm.end()) {
76-
ivm.erase(e);
77-
// FIXME create delete op and return it
78-
}
75+
dm.drop(_a);
76+
// FIXME create delete op and return it
7977
break;
8078
case RUN:
8179
return true;

src/ddptensor.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,7 @@ PYBIND11_MODULE(_ddptensor, m) {
137137
.def("_get_slice", &GetItem::get_slice)
138138
.def("_get_local", &GetItem::get_local)
139139
.def("_gather", &GetItem::gather)
140-
.def("to_numpy", &IO::to_numpy)
141-
.def("ttt", &jit::ttt);
140+
.def("to_numpy", &IO::to_numpy);
142141

143142
py::class_<Creator>(m, "Creator")
144143
.def("create_from_shape", &Creator::create_from_shape)

src/include/ddptensor/Deferred.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ struct Runable
1717
virtual void run() = 0;
1818
/// generate MLIR code for jit
1919
/// @return true if last operation in to-be-compiled region, false otherwise
20-
virtual bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::IdValueMap &)
20+
virtual bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::DepManager &)
2121
{
2222
throw(std::runtime_error("No MLIR support for this operation."));
2323
return false;
@@ -145,7 +145,7 @@ struct DeferredLambda : public Runable
145145
_l();
146146
}
147147

148-
bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::IdValueMap &)
148+
bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::DepManager &)
149149
{
150150
return _l();
151151
}

src/include/ddptensor/jit/mlir.hpp

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,26 +8,60 @@
88
#include <mlir/IR/MLIRContext.h>
99
#include <mlir/Pass/PassManager.h>
1010
#include <mlir/IR/Builders.h>
11+
#include <mlir/Dialect/Func/IR/FuncOps.h>
1112

1213
#include <unordered_map>
1314
#include <functional>
1415
#include <utility>
16+
#include <vector>
1517

1618
namespace jit {
1719

1820
// function type for building body for linalg::generic
1921
using SetResFunc = std::function<void(
2022
uint64_t rank, void *allocated, void *aligned, intptr_t offset, const intptr_t * sizes, const intptr_t * strides)>;
21-
using IdValueMap = std::unordered_map<id_type, std::pair<::mlir::Value, SetResFunc>>;
2223

2324
// initialize jit
2425
void init();
2526

26-
void ttt();
27-
2827
// create a constant integer with given value
2928
extern ::mlir::Value createI64(const ::mlir::Location & loc, ::mlir::OpBuilder & builder, int64_t val);
3029

30+
/// Manages iput/output (tensor) dependences
31+
class DepManager
32+
{
33+
private:
34+
using IdValueMap = std::unordered_map<id_type, std::pair<::mlir::Value, SetResFunc>>;
35+
using IdRankMap = std::unordered_map<id_type, int>;
36+
::mlir::func::FuncOp & _func; // MLIR function to which ops are added
37+
IdValueMap _ivm; // guid -> {mlir::Value, deliver-callback}
38+
IdRankMap _irm; // guid -> rank as computed in MLIR
39+
std::vector<id_type> _args; // input args to generated function
40+
41+
public:
42+
DepManager(::mlir::func::FuncOp & f)
43+
: _func(f)
44+
{}
45+
/// @return the ::mlir::Value representing the tensor with guid guid
46+
/// If the tensor is not created wtihin the current function, it will
47+
/// be added as a function argument.
48+
::mlir::Value getDependent(::mlir::OpBuilder & builder, id_type guid);
49+
50+
/// for the tensor guid register the ::mlir::value and a callback to deliver the promise which generated the value
51+
/// if the tensor is alive when the function returns it will be added to the list of results
52+
void addVal(id_type guid, ::mlir::Value val, SetResFunc cb);
53+
54+
/// signals end of lifetime of given tensor: does not need to be returned
55+
void drop(id_type guid);
56+
57+
/// create return statement and add results to function
58+
/// @return size of output in number of intptr_t's
59+
uint64_t handleResult(::mlir::OpBuilder & builder);
60+
61+
/// devlier promise after execution
62+
void deliver(intptr_t *, uint64_t);
63+
};
64+
3165
// A class to manage the MLIR business (compilation and execution).
3266
// Just a stub for now, will need to be extended with paramters and maybe more.
3367
class JIT {

0 commit comments

Comments
 (0)