Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

--convert-onnx-to-stablehlo bombs with garbage SmallVector size #2806

Open
PerMildner opened this issue Apr 24, 2024 · 0 comments
Open

--convert-onnx-to-stablehlo bombs with garbage SmallVector size #2806

PerMildner opened this issue Apr 24, 2024 · 0 comments

Comments

@PerMildner
Copy link

The attached input file crashes onnx-mlir-opt --convert-onnx-to-stablehlo (but not `--convert-onnx-to-tosa).

module attributes {llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.target_triple = "x86_64-unknown-linux-gnu", "onnx-mlir.symbol-postfix" = "model.onnx-basic.mlir"} {
  func.func @main_graph(%arg0: tensor<2x3x4x5xf32> {onnx.name = "X"}, %arg1: tensor<4x5xf32> {onnx.name = "W"}, %arg2: tensor<4x5xf32> {onnx.name = "B"}) -> (tensor<2x3x4x5xf32> {onnx.name = "Y"}, tensor<2x3x1x1xf32> {onnx.name = "Mean"}, tensor<2x3x1x1xf32> {onnx.name = "InvStdDev"}) {
    %0 = onnx.Constant dense<9.99999974E-6> : tensor<f32>
    %1 = "onnx.Cast"(%0) {saturate = 1 : si64, to = f32} : (tensor<f32>) -> tensor<f32>
    %2 = "onnx.Shape"(%arg0) {start = 0 : si64} : (tensor<2x3x4x5xf32>) -> tensor<4xi64>
    %3 = "onnx.Size"(%2) : (tensor<4xi64>) -> tensor<i64>
    %4 = onnx.Constant dense<0> : tensor<1xi64>
    %5 = onnx.Constant dense<-2> : tensor<1xi64>
    %6 = "onnx.NoValue"() {value} : () -> none
    %7 = "onnx.NoValue"() {value} : () -> none
    %8 = "onnx.Slice"(%2, %4, %5, %6, %7) : (tensor<4xi64>, tensor<1xi64>, tensor<1xi64>, none, none) -> tensor<2xi64>
    %9 = "onnx.Neg"(%5) : (tensor<1xi64>) -> tensor<1xi64>
    %10 = onnx.ConstantOfShape(%9) {value = dense<1> : tensor<1xi64>} : (tensor<1xi64>) -> tensor<?xi64>
    %11 = "onnx.Concat"(%8, %10) {axis = 0 : si64} : (tensor<2xi64>, tensor<?xi64>) -> tensor<?xi64>
    %12 = "onnx.Flatten"(%arg0) {axis = -2 : si64} : (tensor<2x3x4x5xf32>) -> tensor<6x20xf32>
    %13 = "onnx.Cast"(%12) {saturate = 1 : si64, to = f32} : (tensor<6x20xf32>) -> tensor<6x20xf32>
    %14 = onnx.Constant dense<1> : tensor<1xi64>
    %15 = "onnx.ReduceMean"(%13, %14) {keepdims = 1 : si64, noop_with_empty_axes = 0 : si64} : (tensor<6x20xf32>, tensor<1xi64>) -> tensor<6x1xf32>
    %16 = "onnx.Mul"(%13, %13) : (tensor<6x20xf32>, tensor<6x20xf32>) -> tensor<6x20xf32>
    %17 = "onnx.ReduceMean"(%16, %14) {keepdims = 1 : si64, noop_with_empty_axes = 0 : si64} : (tensor<6x20xf32>, tensor<1xi64>) -> tensor<6x1xf32>
    %18 = "onnx.Mul"(%15, %15) : (tensor<6x1xf32>, tensor<6x1xf32>) -> tensor<6x1xf32>
    %19 = "onnx.Sub"(%17, %18) : (tensor<6x1xf32>, tensor<6x1xf32>) -> tensor<6x1xf32>
    %20 = "onnx.Add"(%19, %1) : (tensor<6x1xf32>, tensor<f32>) -> tensor<6x1xf32>
    %21 = "onnx.Sqrt"(%20) : (tensor<6x1xf32>) -> tensor<6x1xf32>
    %22 = "onnx.Sub"(%13, %15) : (tensor<6x20xf32>, tensor<6x1xf32>) -> tensor<6x20xf32>
    %23 = "onnx.Div"(%22, %21) : (tensor<6x20xf32>, tensor<6x1xf32>) -> tensor<6x20xf32>
    %24 = "onnx.Cast"(%23) {saturate = 1 : si64, to = f32} : (tensor<6x20xf32>) -> tensor<6x20xf32>
    %25 = "onnx.Flatten"(%arg1) {axis = 0 : si64} : (tensor<4x5xf32>) -> tensor<1x20xf32>
    %26 = "onnx.Mul"(%24, %25) : (tensor<6x20xf32>, tensor<1x20xf32>) -> tensor<6x20xf32>
    %27 = "onnx.Flatten"(%arg2) {axis = 0 : si64} : (tensor<4x5xf32>) -> tensor<1x20xf32>
    %28 = "onnx.Add"(%26, %27) : (tensor<6x20xf32>, tensor<1x20xf32>) -> tensor<6x20xf32>
    %29 = "onnx.Reshape"(%28, %2) {allowzero = 0 : si64} : (tensor<6x20xf32>, tensor<4xi64>) -> tensor<2x3x4x5xf32>
    %30 = "onnx.Reciprocal"(%21) : (tensor<6x1xf32>) -> tensor<6x1xf32>
    %31 = "onnx.Reshape"(%15, %11) {allowzero = 0 : si64} : (tensor<6x1xf32>, tensor<?xi64>) -> tensor<2x3x1x1xf32>
    %32 = "onnx.Reshape"(%30, %11) {allowzero = 0 : si64} : (tensor<6x1xf32>, tensor<?xi64>) -> tensor<2x3x1x1xf32>
    onnx.Return %29, %31, %32 : tensor<2x3x4x5xf32>, tensor<2x3x1x1xf32>, tensor<2x3x1x1xf32>
  }
  "onnx.EntryPoint"() {func = @main_graph} : () -> ()
}

The problem happens in

// src/Dialect/ONNX/ONNXOps/Tensor/Reshape.cpp:26
template <>
LogicalResult ONNXReshapeOpShapeHelper::computeShape() {
  ONNXReshapeOpAdaptor operandAdaptor(operands);
  DimsExpr outputDims;

  // Get info about input data operand.
  Value data = operandAdaptor.getData();
  int64_t dataRank = data.getType().cast<ShapedType>().getShape().size();

  // Get info about shape operand.
  Value shape = operandAdaptor.getShape();
  int64_t outputRank = createIE->getShape(shape, 0);
  assert(outputRank != -1 && "Shape tensor must have constant shape");
  // !!!! Here outputRank is outputRank = 0x8000000000000000, i.e. -9223372036854775808
  // Initialize context and results.
  outputDims.resize(outputRank); // <--- !!!! This crashes
  ...
}

Neither ASan nor valgrind complains.

Commit 4400cbc.

Full transcript attached.

bad_onnx.txt

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant