Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 13 additions & 5 deletions src/tirx/op/op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -994,11 +994,19 @@ PrimExpr min(PrimExpr source, ffi::Array<IterVar> rdom, ffi::Array<PrimExpr> ini
}

PrimExpr prod(PrimExpr source, ffi::Array<IterVar> rdom, ffi::Array<PrimExpr> init, Span span) {
Var x("x", source.dtype(), span), y("y", source.dtype(), span);
PrimExpr result = tirx::Mul(x, y, span);
PrimExpr identity_element = make_const(source.dtype(), 1, span);
tirx::CommReducer combiner = tirx::CommReducer({x}, {y}, {result}, {identity_element}, span);
return tirx::Reduce(combiner, {source}, rdom, make_const(DataType::Bool(), true), 0, init, span);
if (source.dtype().is_bool()) {
// Bool product (prod) has the same truth table as logical AND. Reuse all() to
// avoid lowering bool prod through Mul, which LLVM codegen does not support.
return all(source, rdom, init, span);
} else {
// For non-bool types, we lower prod through Mul.
Var x("x", source.dtype(), span), y("y", source.dtype(), span);
PrimExpr result = tirx::Mul(x, y, span);
PrimExpr identity_element = make_const(source.dtype(), 1, span);
tirx::CommReducer combiner = tirx::CommReducer({x}, {y}, {result}, {identity_element}, span);
return tirx::Reduce(combiner, {source}, rdom, make_const(DataType::Bool(), true), 0, init,
span);
}
}

// fmod
Expand Down
18 changes: 11 additions & 7 deletions tests/python/relax/test_frontend_from_exported_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -7724,24 +7724,28 @@ def main(
verify_model(VarCorrection0(), example_args, {}, Expected0)


def test_prod():
@pytest.mark.parametrize(
"torch_dtype,relax_dtype",
[(torch.float32, "float32"), (torch.bool, "bool")],
)
def test_prod(torch_dtype, relax_dtype):
class Prod(Module):
def forward(self, x):
return torch.prod(x)
return torch.prod(x, dtype=torch_dtype)

@tvm.script.ir_module
class Expected:
@R.function
def main(
x: R.Tensor((5, 3), dtype="float32"),
) -> R.Tuple(R.Tensor((), dtype="float32")):
x: R.Tensor((5, 3), dtype=relax_dtype),
) -> R.Tuple(R.Tensor((), dtype=relax_dtype)):
with R.dataflow():
lv: R.Tensor((), dtype="float32") = R.prod(x, axis=None, keepdims=False)
gv: R.Tuple(R.Tensor((), dtype="float32")) = (lv,)
lv: R.Tensor((), dtype=relax_dtype) = R.prod(x, axis=None, keepdims=False)
gv: R.Tuple(R.Tensor((), dtype=relax_dtype)) = (lv,)
R.output(gv)
return gv

example_args = (torch.randn(5, 3, dtype=torch.float32),)
example_args = (torch.ones(5, 3, dtype=torch_dtype),)
verify_model(Prod(), example_args, {}, Expected)


Expand Down
18 changes: 11 additions & 7 deletions tests/python/relax/test_frontend_from_fx.py
Original file line number Diff line number Diff line change
Expand Up @@ -6231,24 +6231,28 @@ def main(
verify_model(Var(), [([5, 3], "float32")], {}, Expected)


def test_prod():
@pytest.mark.parametrize(
"torch_dtype,relax_dtype",
[(torch.float32, "float32"), (torch.bool, "bool")],
)
def test_prod(torch_dtype, relax_dtype):
class Prod(Module):
def forward(self, x):
return torch.prod(x)
return torch.prod(x, dtype=torch_dtype)

@tvm.script.ir_module
class Expected:
@R.function
def main(
inp_0: R.Tensor((5, 3), dtype="float32"),
) -> R.Tensor((), dtype="float32"):
inp_0: R.Tensor((5, 3), dtype=relax_dtype),
) -> R.Tensor((), dtype=relax_dtype):
with R.dataflow():
lv: R.Tensor((), dtype="float32") = R.prod(inp_0, axis=None, keepdims=False)
gv: R.Tensor((), dtype="float32") = lv
lv: R.Tensor((), dtype=relax_dtype) = R.prod(inp_0, axis=None, keepdims=False)
gv: R.Tensor((), dtype=relax_dtype) = lv
R.output(gv)
return gv

verify_model(Prod(), [([5, 3], "float32")], {}, Expected)
verify_model(Prod(), [([5, 3], relax_dtype)], {}, Expected)


def test_cumprod():
Expand Down
2 changes: 1 addition & 1 deletion tests/python/relax/test_frontend_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -4924,7 +4924,7 @@ def test_nms_max_output_boxes_per_class_zero(with_explicit_max: bool):
check_correctness(model, inputs=inputs, opset=11)

tvm_out = run_in_tvm(model, inputs=inputs, opset=11)
tvm_selected = tvm_out[0].numpy() if isinstance(tvm_out, (list, tuple)) else tvm_out.numpy()
tvm_selected = tvm_out[0].numpy() if isinstance(tvm_out, list | tuple) else tvm_out.numpy()
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The use of the pipe operator (|) for type unions in isinstance is a Python 3.10+ feature (PEP 604). TVM currently maintains compatibility with older Python versions (e.g., 3.8 and 3.9), and this change will cause a TypeError in those environments. Please revert to the compatible tuple syntax: isinstance(tvm_out, (list, tuple)). Additionally, this change appears to be an unrelated drive-by modification.

Suggested change
tvm_selected = tvm_out[0].numpy() if isinstance(tvm_out, list | tuple) else tvm_out.numpy()
tvm_selected = tvm_out[0].numpy() if isinstance(tvm_out, (list, tuple)) else tvm_out.numpy()

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since TVM now requires Python 3.10+, this compatibility issue is no longer relevan.

assert tvm_selected.shape == (0, 3)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,39 @@ def prod(rxplaceholder: T.Buffer((T.int64(2), T.int64(3), T.int64(4), T.int64(5)
tvm.ir.assert_structural_equal(mod, Expected)


def test_prod_bool():
# fmt: off
@tvm.script.ir_module
class Prod:
@R.function
def main(x: R.Tensor((2, 3, 4, 5), "bool")) -> R.Tensor((1, 1, 1, 1), "bool"):
gv: R.Tensor((1, 1, 1, 1), "bool") = R.prod(x, keepdims=True)
return gv

@tvm.script.ir_module
class Expected:
@R.function
def main(x: R.Tensor((2, 3, 4, 5), "bool")) -> R.Tensor((1, 1, 1, 1), "bool"):
gv = R.call_tir(Expected.prod, (x,), R.Tensor((1, 1, 1, 1), dtype="bool"))
return gv

@T.prim_func(private=True)
def prod(rxplaceholder: T.Buffer((T.int64(2), T.int64(3), T.int64(4), T.int64(5)), "bool"), rxplaceholder_red: T.Buffer((T.int64(1), T.int64(1), T.int64(1), T.int64(1)), "bool")):
T.func_attr({"tirx.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7 in T.grid(T.int64(1), T.int64(1), T.int64(1), T.int64(1), T.int64(2), T.int64(3), T.int64(4), T.int64(5)):
with T.sblock("rxplaceholder_red"):
ax0, ax1, ax2, ax3, k0, k1, k2, k3 = T.axis.remap("SSSSRRRR", [i0, i1, i2, i3, i4, i5, i6, i7])
T.reads(rxplaceholder[k0, k1, k2, k3])
T.writes(rxplaceholder_red[ax0, ax1, ax2, ax3])
with T.init():
rxplaceholder_red[ax0, ax1, ax2, ax3] = T.bool(1)
rxplaceholder_red[ax0, ax1, ax2, ax3] = rxplaceholder_red[ax0, ax1, ax2, ax3] and rxplaceholder[k0, k1, k2, k3]
# fmt: on

mod = LegalizeOps()(Prod)
tvm.ir.assert_structural_equal(mod, Expected)


def test_prod_symbolic():
# fmt: off
@tvm.script.ir_module
Expand Down
Loading