Skip to content
This repository was archived by the owner on Jan 26, 2026. It is now read-only.

Commit 1b52485

Browse files
authored
enabling tests with 2 mpir ranks (#23)
enabling tests with 2 mpi ranks
1 parent f0434ba commit 1b52485

File tree

10 files changed

+31
-12
lines changed

10 files changed

+31
-12
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ jobs:
132132
cd test
133133
pytest .
134134
DDPT_FORCE_DIST=1 pytest .
135+
mpirun -n 2 pytest .
135136
cd -
136137
- name: Run examples
137138
run: |

ddptensor/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@
3939

4040
_ddpt_cw = _bool(int(getenv("DDPT_CW", False)))
4141

42+
pi = 3.1415926535897932384626433
43+
4244

4345
def init(cw=None):
4446
cw = _ddpt_cw if cw is None else cw

imex_version.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
865cea0c1af5fe7ac1458907637c28f69afd7146
1+
e9b8ef1bb2126a4aa544ae1b25eae33f7f9a2d80

src/DDPTensorImpl.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,23 @@ void *DDPTensorImpl::data() {
115115
return ret;
116116
}
117117

118+
bool DDPTensorImpl::is_sliced() const {
119+
if (ndims() == 0)
120+
return false;
121+
auto d = ndims() - 1;
122+
intptr_t tsz = _strides[d];
123+
if (tsz == 1) {
124+
for (; d > 0; --d) {
125+
tsz *= _sizes[d];
126+
if (tsz <= 0)
127+
break;
128+
if (_strides[d - 1] > tsz)
129+
return true;
130+
}
131+
}
132+
return false;
133+
}
134+
118135
std::string DDPTensorImpl::__repr__() const {
119136
const auto nd = ndims();
120137
std::ostringstream oss;

src/IO.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,5 +12,5 @@
1212

1313
GetItem::py_future_type IO::to_numpy(const ddptensor &a) {
1414
assert(!getTransceiver()->is_cw() || getTransceiver()->rank() == 0);
15-
return GetItem::gather(a, 0);
15+
return GetItem::gather(a, getTransceiver()->is_cw() ? 0 : REPLICATED);
1616
}

src/idtr.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ void _idtr_reshape(DTypeId ddpttype, int64_t lRank, int64_t *gShapePtr,
349349
for (auto i = 0; i < N; ++i) {
350350
dspl[i] = 4 * i;
351351
}
352-
tc->gather(buff.data(), counts.data(), dspl.data(), ddpttype, REPLICATED);
352+
tc->gather(buff.data(), counts.data(), dspl.data(), INT64, REPLICATED);
353353

354354
// compute overlaps of current parts with requested parts
355355
// and store meta for alltoall
@@ -409,7 +409,7 @@ void _idtr_reshape(int64_t gShapeRank, void *gShapeDescr, int64_t lOffsRank,
409409

410410
_idtr_reshape(ddpttype, lRank, MRIdx1d(gShapeRank, gShapeDescr).data(),
411411
lData.data(), lData.sizes(), lData.strides(),
412-
MRIdx1d(oOffsRank, oOffsDescr).data(), oRank,
412+
MRIdx1d(lOffsRank, lOffsDescr).data(), oRank,
413413
MRIdx1d(oGShapeRank, oGShapeDescr).data(), oData.data(),
414414
oData.sizes(), MRIdx1d(oOffsRank, oOffsDescr).data(), tc);
415415
}
@@ -465,7 +465,7 @@ void _idtr_repartition(DTypeId ddpttype, int64_t rank, void *lDataPtr,
465465
for (auto i = 0; i < N; ++i) {
466466
dspl[i] = 2 * rank * i;
467467
}
468-
tc->gather(buff.data(), counts.data(), dspl.data(), ddpttype, REPLICATED);
468+
tc->gather(buff.data(), counts.data(), dspl.data(), INT64, REPLICATED);
469469

470470
// compute overlap of my local data with each requested part
471471

src/include/ddptensor/CppTypes.hpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,9 @@ inline RedOpType red_op(const char *op) {
152152
throw std::logic_error("unsupported reduction operation");
153153
}
154154

155+
/// denotes the full dimension, like when using '[:]' in array subscription
156+
constexpr auto ALL_SIZE = std::numeric_limits<int64_t>::max();
157+
155158
inline shape_type reduce_shape(const shape_type &shape,
156159
const dim_vec_type &dims) {
157160
auto ssz = shape.size();

src/include/ddptensor/DDPTensorImpl.hpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,7 @@ class DDPTensorImpl : public tensor_i {
7676
void *data();
7777

7878
/// @return true if tensor is a sliced
79-
bool is_sliced() const {
80-
assert(!"Not implemented");
81-
return false;
82-
}
79+
bool is_sliced() const;
8380

8481
/// python object's __repr__
8582
virtual std::string __repr__() const override;

src/include/ddptensor/PyTypes.hpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,7 @@ template <typename T> T to_native(const py::object &o) { return o.cast<T>(); }
126126
inline void compute_slice(const py::slice &slc, uint64_t &offset,
127127
uint64_t &size, uint64_t &stride) {
128128
uint64_t dmy = 0;
129-
slc.compute(std::numeric_limits<int64_t>::max(), &offset, &dmy, &stride,
130-
&size);
129+
slc.compute(ALL_SIZE, &offset, &dmy, &stride, &size);
131130
}
132131

133132
#if 0

test/test_ewb.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def test_add1(self):
1818
a = dt.ones((6, 6), dtype=dtyp)
1919
b = dt.ones((6, 6), dtype=dtyp)
2020
c = a + b
21-
r1 = dt.sum(c, [0, 1])
21+
r1 = dt.sum(c)
2222
v = 6 * 6 * 2
2323
assert float(r1) == v
2424

0 commit comments

Comments
 (0)