Skip to content
This repository was archived by the owner on Jan 26, 2026. It is now read-only.

Commit f968b8b

Browse files
committed
adding compile&run trigger Service::run()
1 parent deef030 commit f968b8b

File tree

12 files changed

+152
-117
lines changed

12 files changed

+152
-117
lines changed

src/Creator.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ struct DeferredArange : public Deferred
159159
// set_value(std::move(TypeDispatch<x::Creator>(_dtype, _start, _end, _step)));
160160
};
161161

162-
::mlir::Value generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
162+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
163163
{
164164
// create start, stop and step
165165
auto start = jit::createI64(loc, builder, _start);
@@ -177,7 +177,7 @@ struct DeferredArange : public Deferred
177177
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
178178
};
179179
ivm[_guid] = {ar, setter};
180-
return ar;
180+
return false;
181181
}
182182

183183
FactoryId factory() const

src/Deferred.cpp

Lines changed: 97 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -81,99 +81,107 @@ class DepManager
8181

8282
void process_promises()
8383
{
84+
bool done = false;
8485
jit::JIT jit;
85-
::mlir::OpBuilder builder(&jit._context);
86-
auto loc = builder.getUnknownLoc();
87-
jit::IdValueMap ivp;
88-
89-
// Create a MLIR module
90-
auto module = builder.create<::mlir::ModuleOp>(loc);
91-
// Create a func
92-
auto dtype = builder.getI64Type();
93-
// create dummy type, we'll replace it with the actual type later
94-
auto dummyFuncType = builder.getFunctionType({}, dtype);
95-
std::string fname("ddpt_jit");
96-
auto function = builder.create<::mlir::func::FuncOp>(loc, fname, dummyFuncType);
97-
// create function entry block
98-
auto &entryBlock = *function.addEntryBlock();
99-
// Set the insertion point in the builder to the beginning of the function body
100-
builder.setInsertionPointToStart(&entryBlock);
101-
// we need to keep runables/deferred/futures alive until we set their values below
102-
std::vector<Runable::ptr_type> runables;
103-
104-
while(true) {
105-
Runable::ptr_type d;
106-
_deferred.pop(d);
107-
if(d) {
108-
// d->run();
109-
(void) d->generate_mlir(builder, loc, ivp);
110-
// keep alive for later set_value
111-
runables.push_back(std::move(d));
112-
//d.reset();
113-
} else {
114-
break;
115-
}
116-
}
11786

118-
// Now we have to define the return type as a ValueRange of all arrays which we have created
119-
// (runnables have put them into ivp)
120-
// We also compute the total size of the struct llvm created for this return type
121-
// llvm will basically return a struct with all the arrays as members, each of type JIT::MemRefDescriptor
122-
123-
// Need a container to put all return values, will be used to construct TypeRange
124-
std::vector<::mlir::Type> ret_types;
125-
ret_types.reserve(ivp.size());
126-
std::vector<::mlir::Value> ret_values;
127-
ret_types.reserve(ivp.size());
128-
std::unordered_map<id_type, uint64_t> rank_map;
129-
// here we store the total size of the llvm struct
130-
uint64_t sz = 0;
131-
for(auto & v : ivp) {
132-
auto value = v.second.first;
133-
// append the type and array/value
134-
ret_types.push_back(value.getType());
135-
ret_values.push_back(value);
136-
auto ptt = value.getType().dyn_cast<::imex::ptensor::PTensorType>();
137-
assert(ptt);
138-
auto rank = ptt.getRtensor().getShape().size();
139-
rank_map[v.first] = rank;
140-
// add sizeof(MemRefDescriptor<elementtype, rank>) to sz
141-
sz += 3 + 2 * rank;
142-
}
143-
::mlir::TypeRange ret_tr(ret_types);
144-
::mlir::ValueRange ret_vr(ret_values);
145-
146-
// add return statement
147-
auto ret_value = builder.create<::mlir::func::ReturnOp>(loc, ret_vr);
148-
// Define and assign correct function type
149-
auto funcTypeAttr = ::mlir::TypeAttr::get(builder.getFunctionType({}, ret_tr));
150-
function.setFunctionTypeAttr(funcTypeAttr);
151-
// also request generation of c-wrapper function
152-
function->setAttr(::mlir::LLVM::LLVMDialect::getEmitCWrapperAttrName(), ::mlir::UnitAttr::get(&jit._context));
153-
// add the function to the module
154-
module.push_back(function);
155-
module.dump();
156-
// finally compile and run the module
157-
assert(sizeof(intptr_t) == sizeof(void*));
158-
intptr_t * output = new intptr_t[sz];
159-
std::cout << ivp.size() << " sz: " << sz << std::endl;
160-
if(jit.run(module, fname, output)) throw std::runtime_error("failed running jit");
87+
do {
88+
::mlir::OpBuilder builder(&jit._context);
89+
auto loc = builder.getUnknownLoc();
90+
jit::IdValueMap ivp;
16191

162-
size_t pos = 0;
163-
for(auto & v : ivp) {
164-
auto value = v.second.first;
165-
auto rank = rank_map[v.first];
166-
void * allocated = (void*)output[pos];
167-
void * aligned = (void*)output[pos+1];
168-
intptr_t offset = output[pos+2];
169-
intptr_t * sizes = output + pos + 3;
170-
intptr_t * stride = output + pos + 3 + rank;
171-
pos += 3 + 2 * rank;
172-
v.second.second(rank, allocated, aligned, offset, sizes, stride);
173-
}
92+
// Create a MLIR module
93+
auto module = builder.create<::mlir::ModuleOp>(loc);
94+
// Create a func
95+
auto dtype = builder.getI64Type();
96+
// create dummy type, we'll replace it with the actual type later
97+
auto dummyFuncType = builder.getFunctionType({}, dtype);
98+
std::string fname("ddpt_jit");
99+
auto function = builder.create<::mlir::func::FuncOp>(loc, fname, dummyFuncType);
100+
// create function entry block
101+
auto &entryBlock = *function.addEntryBlock();
102+
// Set the insertion point in the builder to the beginning of the function body
103+
builder.setInsertionPointToStart(&entryBlock);
104+
// we need to keep runables/deferred/futures alive until we set their values below
105+
std::vector<Runable::ptr_type> runables;
106+
107+
while(true) {
108+
Runable::ptr_type d;
109+
_deferred.pop(d);
110+
if(d) {
111+
if(d->generate_mlir(builder, loc, ivp)) {
112+
d.reset();
113+
break;
114+
};
115+
// keep alive for later set_value
116+
runables.push_back(std::move(d));
117+
} else {
118+
// signals system shutdown
119+
done = true;
120+
break;
121+
}
122+
}
174123

175-
// finally release all our runables/tasks/deferred/futures
176-
runables.clear();
124+
if(runables.empty()) continue;
125+
126+
// Now we have to define the return type as a ValueRange of all arrays which we have created
127+
// (runnables have put them into ivp when generating mlir)
128+
// We also compute the total size of the struct llvm created for this return type
129+
// llvm will basically return a struct with all the arrays as members, each of type JIT::MemRefDescriptor
130+
131+
// Need a container to put all return values, will be used to construct TypeRange
132+
std::vector<::mlir::Type> ret_types;
133+
ret_types.reserve(ivp.size());
134+
std::vector<::mlir::Value> ret_values;
135+
ret_types.reserve(ivp.size());
136+
std::unordered_map<id_type, uint64_t> rank_map;
137+
// here we store the total size of the llvm struct
138+
uint64_t sz = 0;
139+
for(auto & v : ivp) {
140+
auto value = v.second.first;
141+
// append the type and array/value
142+
ret_types.push_back(value.getType());
143+
ret_values.push_back(value);
144+
auto ptt = value.getType().dyn_cast<::imex::ptensor::PTensorType>();
145+
assert(ptt);
146+
auto rank = ptt.getRtensor().getShape().size();
147+
rank_map[v.first] = rank;
148+
// add sizeof(MemRefDescriptor<elementtype, rank>) to sz
149+
sz += 3 + 2 * rank;
150+
}
151+
::mlir::TypeRange ret_tr(ret_types);
152+
::mlir::ValueRange ret_vr(ret_values);
153+
154+
// add return statement
155+
auto ret_value = builder.create<::mlir::func::ReturnOp>(loc, ret_vr);
156+
// Define and assign correct function type
157+
auto funcTypeAttr = ::mlir::TypeAttr::get(builder.getFunctionType({}, ret_tr));
158+
function.setFunctionTypeAttr(funcTypeAttr);
159+
// also request generation of c-wrapper function
160+
function->setAttr(::mlir::LLVM::LLVMDialect::getEmitCWrapperAttrName(), ::mlir::UnitAttr::get(&jit._context));
161+
// add the function to the module
162+
module.push_back(function);
163+
module.dump();
164+
165+
// compile and run the module
166+
assert(sizeof(intptr_t) == sizeof(void*));
167+
intptr_t * output = new intptr_t[sz];
168+
std::cout << ivp.size() << " sz: " << sz << std::endl;
169+
if(jit.run(module, fname, output)) throw std::runtime_error("failed running jit");
170+
171+
// push results to fulfill promises
172+
size_t pos = 0;
173+
for(auto & v : ivp) {
174+
auto value = v.second.first;
175+
auto rank = rank_map[v.first];
176+
void * allocated = (void*)output[pos];
177+
void * aligned = (void*)output[pos+1];
178+
intptr_t offset = output[pos+2];
179+
intptr_t * sizes = output + pos + 3;
180+
intptr_t * stride = output + pos + 3 + rank;
181+
pos += 3 + 2 * rank;
182+
v.second.second(rank, allocated, aligned, offset, sizes, stride);
183+
}
184+
} while(!done);
177185
}
178186

179187
void sync()

src/EWBinOp.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ struct DeferredEWBinOp : public Deferred
456456
#endif
457457
}
458458

459-
::mlir::Value generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
459+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
460460
{
461461
// FIXME the type of the result is based on a only
462462
auto rtyp = ivm[_a].first.getType();
@@ -465,7 +465,7 @@ struct DeferredEWBinOp : public Deferred
465465
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
466466
};
467467
ivm[_guid] = {ewbo, setter};
468-
return ewbo;
468+
return false;
469469
}
470470

471471
FactoryId factory() const

src/ReduceOp.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ struct DeferredReduceOp : public Deferred
116116
#endif
117117
}
118118

119-
::mlir::Value generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
119+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
120120
{
121121
// FIXME reduction over individual dimensions is not supported
122122
auto a = ivm[_a].first;
@@ -133,7 +133,7 @@ struct DeferredReduceOp : public Deferred
133133
this->set_value(std::move(mk_tnsr(_dtype, rank, allocated, aligned, offset, sizes, strides)));
134134
};
135135
ivm[_guid] = {rop, setter};
136-
return rop;
136+
return false;
137137
}
138138

139139
FactoryId factory() const

src/Service.cpp

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,17 @@ struct DeferredService : public Deferred
3535
{
3636
enum Op : int {
3737
REPLICATE,
38-
DROP
38+
DROP,
39+
RUN,
40+
SERVICE_LAST
3941
};
4042

4143
id_type _a;
4244
Op _op;
4345

44-
DeferredService() = default;
46+
DeferredService(Op op = SERVICE_LAST)
47+
: _a(), _op(op)
48+
{}
4549
DeferredService(Op op, const tensor_i::future_type & a)
4650
: _a(a.id()), _op(op)
4751
{}
@@ -64,7 +68,7 @@ struct DeferredService : public Deferred
6468
#endif
6569
}
6670

67-
::mlir::Value generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
71+
bool generate_mlir(::mlir::OpBuilder & builder, ::mlir::Location loc, jit::IdValueMap & ivm) override
6872
{
6973
switch(_op) {
7074
case DROP:
@@ -73,11 +77,13 @@ struct DeferredService : public Deferred
7377
// FIXME create delete op and return it
7478
}
7579
break;
80+
case RUN:
81+
return true;
7682
default:
7783
throw(std::runtime_error("Unkown Service operation requested."));
7884
}
7985

80-
return {};
86+
return false;
8187
}
8288

8389
FactoryId factory() const
@@ -98,6 +104,11 @@ ddptensor * Service::replicate(const ddptensor & a)
98104
return new ddptensor(defer<DeferredService>(DeferredService::REPLICATE, a.get()));
99105
}
100106

107+
void Service::run()
108+
{
109+
defer_lambda([](){ return true; });
110+
}
111+
101112
extern bool inited;
102113

103114
void Service::drop(const ddptensor & a)

src/ddptensor.cpp

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -163,21 +163,30 @@ PYBIND11_MODULE(_ddptensor, m) {
163163
py::class_<LinAlgOp>(m, "LinAlgOp")
164164
.def("vecdot", &LinAlgOp::vecdot);
165165

166-
#define GET_REPL(_f) std::unique_ptr<ddptensor>(Service::replicate(f))->get().get()
166+
/// trigger compile&run and return given attribute _x
167+
#define SYNC_RETURN(_f, _a) Service::run(); return (_f).get().get()->_a()
168+
/// Rerplicate ddptensor/future and SYNC_RETURN attributre _a
169+
#define REPL_SYNC_RETURN(_f, _a) auto r_ = std::unique_ptr<ddptensor>(Service::replicate(f)); SYNC_RETURN(r_->get(), _a)
170+
167171
py::class_<ddptensor>(m, "DDPTFuture")
168-
.def_property_readonly("dtype", [](const ddptensor & f) { return f.get().get()->dtype(); })
169-
.def_property_readonly("shape", [](const ddptensor & f) { return f.get().get()->shape(); })
170-
.def_property_readonly("size", [](const ddptensor & f) { return f.get().get()->size(); })
171-
.def_property_readonly("ndim", [](const ddptensor & f) { return f.get().get()->ndim(); })
172-
.def("__bool__", [](const ddptensor & f) { return GET_REPL(f)->__bool__(); })
173-
.def("__float__", [](const ddptensor & f) { return GET_REPL(f)->__float__(); })
174-
.def("__int__", [](const ddptensor & f) { return GET_REPL(f)->__int__(); })
175-
.def("__index__", [](const ddptensor & f) { return GET_REPL(f)->__int__(); })
176-
.def("__len__", [](const ddptensor & f) { return f.get().get()->__len__(); })
177-
.def("__repr__", [](const ddptensor & f) { return f.get().get()->__repr__(); })
172+
// attributes we can get from the future itself
173+
.def_property_readonly("dtype", [](const ddptensor & f) { return f.get().dtype(); })
174+
.def_property_readonly("ndim", [](const ddptensor & f) { return f.get().rank(); })
175+
// attributes we can get from future without additional computation
176+
.def_property_readonly("shape", [](const ddptensor & f) { SYNC_RETURN(f, shape); })
177+
.def_property_readonly("size", [](const ddptensor & f) { SYNC_RETURN(f, size); })
178+
.def("__len__", [](const ddptensor & f) { SYNC_RETURN(f, __len__); })
179+
.def("__repr__", [](const ddptensor & f) { SYNC_RETURN(f, __repr__); })
180+
// attributes extracting values require replication
181+
.def("__bool__", [](const ddptensor & f) { REPL_SYNC_RETURN(f, __bool__); })
182+
.def("__float__", [](const ddptensor & f) { REPL_SYNC_RETURN(f, __float__); })
183+
.def("__int__", [](const ddptensor & f) { REPL_SYNC_RETURN(f, __int__); })
184+
.def("__index__", [](const ddptensor & f) { REPL_SYNC_RETURN(f, __int__); })
185+
// attributes returning a new ddptensor
178186
.def("__getitem__", &GetItem::__getitem__)
179187
.def("__setitem__", &SetItem::__setitem__);
180-
#undef GET_REPL
188+
#undef REPL_SYNC_RETURN
189+
#undef SYNC_RETURN
181190

182191
py::class_<Random>(m, "Random")
183192
.def("seed", &Random::seed)

src/include/ddptensor/Deferred.hpp

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,11 @@ struct Runable
1616
/// actually execute, a deferred will set value of future
1717
virtual void run() = 0;
1818
/// generate MLIR code for jit
19-
virtual ::mlir::Value generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::IdValueMap &)
19+
/// @return true if last operation in to-be-compiled region, false otherwise
20+
virtual bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::IdValueMap &)
2021
{
2122
throw(std::runtime_error("No MLIR support for this operation."));
22-
return {};
23+
return false;
2324
};
2425
virtual FactoryId factory() const = 0;
2526
virtual void defer(ptr_type &&);
@@ -144,6 +145,11 @@ struct DeferredLambda : public Runable
144145
_l();
145146
}
146147

148+
bool generate_mlir(::mlir::OpBuilder &, ::mlir::Location, jit::IdValueMap &)
149+
{
150+
return _l();
151+
}
152+
147153
FactoryId factory() const
148154
{
149155
throw(std::runtime_error("No Factory for DeferredLambda."));

src/include/ddptensor/Service.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,11 @@ class ddptensor;
66

77
struct Service
88
{
9+
/// replicate the given ddptensor on all ranks
910
static ddptensor * replicate(const ddptensor & a);
11+
/// start running/executing operations, e.g. trigger compile&run
12+
/// this is not blocking, use futures for synchronization
13+
static void run();
14+
/// signal that the given ddptensor is no longer needed and can be deleted
1015
static void drop(const ddptensor & a);
1116
};

third_party/xsimd

Lines changed: 0 additions & 1 deletion
This file was deleted.

third_party/xtensor

Lines changed: 0 additions & 1 deletion
This file was deleted.

0 commit comments

Comments
 (0)