Skip to content

Commit

Permalink
[AutoBump] Merge with 356540a (Jan 07)
Browse files Browse the repository at this point in the history
  • Loading branch information
mgehre-amd committed Feb 13, 2025
2 parents ec59592 + 356540a commit af54cba
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 10 deletions.
9 changes: 2 additions & 7 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3513,8 +3513,8 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(

if (mode != "nearest" && mode != "linear")
return rewriter.notifyMatchFailure(
binder.op, "unsupported interpolation mode other than nearest, "
"linear");
binder.op,
R"(Expected valid interpolation mode: "nearest" | "linear")");

int64_t resultRank = resultType.getSizes().size();
if (resultRank > 5)
Expand Down Expand Up @@ -4669,11 +4669,6 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
SmallVector<Type> scanOutTypes;
for (unsigned i = numInits; i < resultTypes.size(); i++) {
auto scanOutTy = cast<Torch::ValueTensorType>(resultTypes[i]);
// TODO: Handle dynamic result types.
if (!scanOutTy.hasSizes() || !scanOutTy.areAllSizesKnown()) {
return rewriter.notifyMatchFailure(
binder.op, "Expects result type to be static");
}
Value sizeList =
createConstantIntList(binder, rewriter, scanOutTy.getSizes());
initVals.push_back(Torch::createInitTensor(rewriter, loc, scanOutTy,
Expand Down
16 changes: 16 additions & 0 deletions lib/Conversion/TorchToLinalg/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,22 @@ Value torch_to_linalg::getOutputDimForConvOps(OpBuilder &b, Location loc,
else
division = b.createOrFold<arith::FloorDivSIOp>(loc, dividend, strideInt);
Value out = b.createOrFold<arith::AddIOp>(loc, division, c1);

if (ceilMode) {
Value outMinusOneTimesStride =
b.createOrFold<arith::MulIOp>(loc, division, strideInt);
Value inAddLeftPadding = b.createOrFold<arith::AddIOp>(
loc, castIndexToInt64(b, loc, in), paddingInt);

auto reduceOutputDimCond =
b.createOrFold<arith::CmpIOp>(loc, arith::CmpIPredicate::uge,
outMinusOneTimesStride, inAddLeftPadding);

auto reducedDim = b.createOrFold<arith::SelectOp>(loc, reduceOutputDimCond,
division, out);
return castIntToIndex(b, loc, reducedDim);
}

return castIntToIndex(b, loc, out);
}

Expand Down
8 changes: 5 additions & 3 deletions lib/Conversion/TorchToTosa/TorchToTosa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5595,9 +5595,11 @@ class ConvertAtenPoolingBaseOp : public OpConversionPattern<AtenOpT> {
} else {
int64_t dimSize =
inputDim + padBefore + padAfter - dilation * (kernelDim - 1) - 1;
if (ceilMode && (dimSize % stride != 0))
return dimSize / stride + 2;
return dimSize / stride + 1;
int64_t outputDim = dimSize / stride + 1;
if (ceilMode && (dimSize % stride != 0) &&
(outputDim * stride < inputDim + padBefore))
outputDim++;
return outputDim;
}
}

Expand Down
16 changes: 16 additions & 0 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -829,6 +829,7 @@
"LenStrModule_basic",
"MaxPool2dCeilModeTrueModule_basic",
"MaxPool2dStaticCeilModeTrueModule_basic",
"MaxPool2dStaticCeilModeTrueReduceOutputModule_basic",
"MaxPool2dWithIndicesBackwardDynamic3DModule_basic",
"MaxPool2dWithIndicesBackwardDynamic4DModule_basic",
"MaxPool2dWithIndicesBackwardStatic3DModule_basic",
Expand Down Expand Up @@ -2371,6 +2372,7 @@
"MatmulStaticBroadcast_basic",
"MaxPool2dEmptyStrideStaticModule_basic",
"MaxPool2dStaticCeilModeTrueModule_basic",
"MaxPool2dStaticCeilModeTrueReduceOutputModule_basic",
"MaxPool2dStaticModule_basic",
"MeanModule_basic",
"MmDagModule_basic",
Expand Down Expand Up @@ -3503,6 +3505,13 @@
"ScaledDotProductAttentionBoolMaskModule_basic",
}

if torch_version_for_comparison() > version.parse("2.5.1"):
ONNX_XFAIL_SET = ONNX_XFAIL_SET | {
# error: 'memref.cast' op operand type 'memref<2x6x4x3xf32>' and result type 'memref<2x6x5x3xf32>' are cast incompatible
# torch.onnx.export produces onnx.MaxPool op with incorrect output shape of 2x6x5x3 instead of 2x6x4x3
"MaxPool2dStaticCeilModeTrueReduceOutputModule_basic",
}

if torch_version_for_comparison() < version.parse("2.4.0.dev"):
STABLEHLO_PASS_SET = STABLEHLO_PASS_SET - {
"AtenIntMM_basic",
Expand Down Expand Up @@ -5152,3 +5161,10 @@
"_LogSoftmaxModule_basic",
"_SoftmaxModule_basic",
}

if torch_version_for_comparison() > version.parse("2.5.1"):
ONNX_TOSA_XFAIL_SET = ONNX_TOSA_XFAIL_SET | {
# error: 'memref.cast' op operand type 'memref<2x6x4x3xf32>' and result type 'memref<2x6x5x3xf32>' are cast incompatible
# torch.onnx.export produces onnx.MaxPool op with incorrect output shape of 2x6x5x3 instead of 2x6x4x3
"MaxPool2dStaticCeilModeTrueReduceOutputModule_basic",
}
29 changes: 29 additions & 0 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,35 @@ def MaxPool2dCeilModeTrueModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 1, 20, 20, low=0.5, high=1.0))


class MaxPool2dStaticCeilModeTrueReduceOutputModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.mp2d = torch.nn.MaxPool2d(
kernel_size=6,
stride=6,
padding=3,
dilation=1,
ceil_mode=True,
)

@export
@annotate_args(
[
None,
([2, 6, 20, 10], torch.float32, True),
]
)
def forward(self, x):
return self.mp2d(x)


@register_test_case(
module_factory=lambda: MaxPool2dStaticCeilModeTrueReduceOutputModule()
)
def MaxPool2dStaticCeilModeTrueReduceOutputModule_basic(module, tu: TestUtils):
module.forward(tu.rand(2, 6, 20, 10, low=0.5, high=1.0))


# ==============================================================================


Expand Down

0 comments on commit af54cba

Please sign in to comment.