Skip to content
This repository was archived by the owner on Jan 26, 2026. It is now read-only.

Commit c1aef79

Browse files
committed
revert using lambdas
1 parent 90e56f6 commit c1aef79

File tree

9 files changed

+152
-52
lines changed

9 files changed

+152
-52
lines changed

src/Creator.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ struct DeferredArange : public Deferred
106106

107107
void run()
108108
{
109-
set_value(TypeDispatch<x::Creator>(_dtype, _start, _end, _step));
109+
set_value(std::move(TypeDispatch<x::Creator>(_dtype, _start, _end, _step)));
110110
};
111111
};
112112

src/EWBinOp.cpp

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,15 +133,29 @@ namespace x {
133133
};
134134
} // namespace x
135135

136+
struct DeferredEWBinOp : public Deferred
137+
{
138+
tensor_i::future_type _a;
139+
tensor_i::future_type _b;
140+
EWBinOpId _op;
141+
142+
DeferredEWBinOp(EWBinOpId op, const tensor_i::future_type & a, const tensor_i::future_type & b)
143+
: _a(a), _b(b), _op(op)
144+
{}
145+
146+
void run()
147+
{
148+
const auto a = std::move(_a.get());
149+
const auto b = std::move(_b.get());
150+
set_value(std::move(TypeDispatch<x::EWBinOp>(a, b, _op)));
151+
}
152+
};
153+
136154
tensor_i::future_type EWBinOp::op(EWBinOpId op, const tensor_i::future_type & a, const py::object & b)
137155
{
138156
auto bb = x::mk_ftx(b);
139157
if(op == __MATMUL__) {
140158
return LinAlgOp::vecdot(a, bb, 0);
141159
}
142-
auto aa = std::move(a.get());
143-
auto bbb = std::move(bb.get());
144-
return defer([op, aa, bbb](){
145-
return TypeDispatch<x::EWBinOp>(aa, bbb, op);
146-
});
160+
return defer<DeferredEWBinOp>(op, a, bb);
147161
}

src/EWUnyOp.cpp

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,10 +108,23 @@ namespace x {
108108
};
109109
} //namespace x
110110

111+
struct DeferredEWUnyOp : public Deferred
112+
{
113+
tensor_i::future_type _a;
114+
EWUnyOpId _op;
115+
116+
DeferredEWUnyOp(EWUnyOpId op, const tensor_i::future_type & a)
117+
: _a(a), _op(op)
118+
{}
119+
120+
void run()
121+
{
122+
const auto a = std::move(_a.get());
123+
set_value(std::move(TypeDispatch<x::EWUnyOp>(a, _op)));
124+
}
125+
};
126+
111127
tensor_i::future_type EWUnyOp::op(EWUnyOpId op, const tensor_i::future_type & a)
112128
{
113-
auto aa = std::move(a.get());
114-
return defer([op, aa](){
115-
return TypeDispatch<x::EWUnyOp>(aa, op);
116-
});
129+
return defer<DeferredEWUnyOp>(op, a);
117130
}

src/IEWBinOp.cpp

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -73,12 +73,25 @@ namespace x {
7373
};
7474
} // namespace x
7575

76+
struct DeferredIEWBinOp : public Deferred
77+
{
78+
tensor_i::future_type _a;
79+
tensor_i::future_type _b;
80+
IEWBinOpId _op;
81+
82+
DeferredIEWBinOp(IEWBinOpId op, tensor_i::future_type & a, const tensor_i::future_type & b)
83+
: _a(a), _b(b), _op(op)
84+
{}
85+
86+
void run()
87+
{
88+
const auto a = std::move(_a.get());
89+
const auto b = std::move(_b.get());
90+
set_value(std::move(TypeDispatch<x::IEWBinOp>(a, b, _op)));
91+
}
92+
};
93+
7694
tensor_i::future_type IEWBinOp::op(IEWBinOpId op, tensor_i::future_type & a, const py::object & b)
7795
{
78-
auto bb = x::mk_ftx(b);
79-
auto aa = std::move(a.get());
80-
auto bbb = std::move(bb.get());
81-
return defer([op, aa, bbb](){
82-
return TypeDispatch<x::IEWBinOp>(aa, bbb, op);
83-
});
96+
return defer<DeferredIEWBinOp>(op, a, x::mk_ftx(b));
8497
}

src/LinAlgOp.cpp

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,11 +109,25 @@ namespace x {
109109
};
110110
}
111111

112+
struct DeferredLinAlgOp : public Deferred
113+
{
114+
tensor_i::future_type _a;
115+
tensor_i::future_type _b;
116+
int _axis;
117+
118+
DeferredLinAlgOp(const tensor_i::future_type & a, const tensor_i::future_type & b, int axis)
119+
: _a(a), _b(b), _axis(axis)
120+
{}
121+
122+
void run()
123+
{
124+
const auto a = std::move(_a.get());
125+
const auto b = std::move(_b.get());
126+
set_value(std::move(TypeDispatch<x::LinAlgOp>(a, b, _axis)));
127+
}
128+
};
129+
112130
tensor_i::future_type LinAlgOp::vecdot(const tensor_i::future_type & a, const tensor_i::future_type & b, int axis)
113131
{
114-
auto aa = std::move(a.get());
115-
auto bb = std::move(b.get());
116-
return defer([aa, bb, axis](){
117-
return TypeDispatch<x::LinAlgOp>(aa, bb, axis);
118-
});
132+
return defer<DeferredLinAlgOp>(a, b, axis);
119133
}

src/ManipOp.cpp

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,23 @@ namespace x {
2323
};
2424
}
2525

26+
struct DeferredManipOp : public Deferred
27+
{
28+
tensor_i::future_type _a;
29+
shape_type _shape;
30+
31+
DeferredManipOp(const tensor_i::future_type & a, const shape_type & shape)
32+
: _a(a), _shape(shape)
33+
{}
34+
35+
void run()
36+
{
37+
const auto a = std::move(_a.get());
38+
set_value(std::move(TypeDispatch<x::ManipOp>(a, _shape)));
39+
}
40+
};
41+
2642
tensor_i::future_type ManipOp::reshape(const tensor_i::future_type & a, const shape_type & shape)
2743
{
28-
auto aa = std::move(a.get());
29-
return defer([aa, shape](){
30-
return TypeDispatch<x::ManipOp>(aa, shape);
31-
});
44+
return defer<DeferredManipOp>(a, shape);
3245
}

src/Random.cpp

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,24 +31,18 @@ struct DeferredRandomOp : public Deferred
3131

3232
void run()
3333
{
34-
set_value(x::Rand<T>::op(_shape, _lower, _upper));
34+
set_value(std::move(x::Rand<T>::op(_shape, _lower, _upper)));
3535
}
3636
};
3737

3838
Random::future_type Random::rand(DTypeId dtype, const shape_type & shape, const py::object & lower, const py::object & upper)
3939
{
4040
switch(dtype) {
4141
case FLOAT64: {
42-
double lo = x::to_native<double>(lower);
43-
double up = x::to_native<double>(upper);
44-
return defer([shape, lo, up](){return x::Rand<double>::op(shape, lo, up);});
45-
//return defer<DeferredRandomOp<double>>(shape, x::to_native<double>(lower), x::to_native<double>(upper));
42+
return defer<DeferredRandomOp<double>>(shape, x::to_native<double>(lower), x::to_native<double>(upper));
4643
}
4744
case FLOAT32: {
48-
float lo = x::to_native<float>(lower);
49-
float up = x::to_native<float>(upper);
50-
return defer([shape, lo, up](){return x::Rand<float>::op(shape, lo, up);});
51-
//return defer<DeferredRandomOp<float>>(shape, x::to_native<double>(lower), x::to_native<double>(upper));
45+
return defer<DeferredRandomOp<float>>(shape, x::to_native<float>(lower), x::to_native<float>(upper));
5246
}
5347
default:
5448
throw std::runtime_error("rand: dtype must be a floating point type");

src/ReduceOp.cpp

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,24 @@ namespace x {
6363
};
6464
} // namespace x
6565

66+
struct DeferredReduceOp : public Deferred
67+
{
68+
tensor_i::future_type _a;
69+
dim_vec_type _dim;
70+
ReduceOpId _op;
71+
72+
DeferredReduceOp(ReduceOpId op, const tensor_i::future_type & a, const dim_vec_type & dim)
73+
: _a(a), _dim(dim), _op(op)
74+
{}
75+
76+
void run()
77+
{
78+
const auto a = std::move(_a.get());
79+
set_value(std::move(TypeDispatch<x::ReduceOp>(a, _op, _dim)));
80+
}
81+
};
82+
6683
tensor_i::future_type ReduceOp::op(ReduceOpId op, const tensor_i::future_type & a, const dim_vec_type & dim)
6784
{
68-
auto aa = std::move(a.get());
69-
return defer([aa, op, dim](){
70-
return TypeDispatch<x::ReduceOp>(aa, op, dim);
71-
});
85+
return defer<DeferredReduceOp>(op, a, dim);
7286
}

src/SetGetItem.cpp

Lines changed: 39 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -134,33 +134,58 @@ namespace x {
134134

135135
} // namespace x
136136

137+
struct DeferredSetItem : public Deferred
138+
{
139+
tensor_i::future_type _a;
140+
tensor_i::future_type _b;
141+
NDSlice _slc;
142+
143+
DeferredSetItem(tensor_i::future_type & a, const tensor_i::future_type & b, const std::vector<py::slice> & v)
144+
: _a(a), _b(b), _slc(v)
145+
{}
146+
147+
void run()
148+
{
149+
const auto a = std::move(_a.get());
150+
const auto b = std::move(_b.get());
151+
set_value(std::move(TypeDispatch<x::SetItem>(a, b, _slc)));
152+
}
153+
};
154+
137155
tensor_i::future_type SetItem::__setitem__(tensor_i::future_type & a, const std::vector<py::slice> & v, const tensor_i::future_type & b)
138156
{
139-
auto aa = std::move(a.get());
140-
auto bb = std::move(b.get());
141-
NDSlice _slc(v);
142-
return defer([aa, bb, _slc](){
143-
return TypeDispatch<x::SetItem>(aa, bb, _slc);
144-
});
157+
return defer<DeferredSetItem>(a, b, v);
145158
}
146159

147-
tensor_i::future_type GetItem::__getitem__(const tensor_i::future_type & a, const std::vector<py::slice> & v)
160+
struct DeferredGetItem : public Deferred
148161
{
149-
auto aa = std::move(a.get());
150-
NDSlice _slc(v);
151-
return defer([aa, _slc](){
152-
return TypeDispatch<x::GetItem>(aa, _slc);
153-
});
162+
tensor_i::future_type _a;
163+
NDSlice _slc;
164+
165+
DeferredGetItem(const tensor_i::future_type & a, const std::vector<py::slice> & v)
166+
: _a(a), _slc(v)
167+
{}
168+
169+
void run()
170+
{
171+
const auto a = std::move(_a.get());
172+
set_value(std::move(TypeDispatch<x::GetItem>(a, _slc)));
173+
}
174+
};
175+
176+
tensor_i::future_type GetItem::__getitem__(const tensor_i::future_type & a, const std::vector<py::slice> & v)
177+
{
178+
return defer<DeferredGetItem>(a, v);
154179
}
155180

156181
py::object GetItem::get_slice(const tensor_i::future_type & a, const std::vector<py::slice> & v)
157182
{
158-
const auto & aa = std::move(a.get());
183+
const auto aa = std::move(a.get());
159184
return TypeDispatch<x::SPMD>(aa, NDSlice(v));
160185
}
161186

162187
py::object GetItem::get_local(const tensor_i::future_type & a, py::handle h)
163188
{
164-
const auto & aa = std::move(a.get());
189+
const auto aa = std::move(a.get());
165190
return TypeDispatch<x::SPMD>(aa, h);
166191
}

0 commit comments

Comments
 (0)