Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

op function body not to allow default opset version #5908

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion onnx/cpp2py_export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
}
FunctionBodyBuildContextImpl ctx(proto, input_types);
FunctionProto func_proto;
op->BuildContextDependentFunction(ctx, func_proto);
op->BuildContextDependentFunction(ctx, func_proto, op->SinceVersion());
func_proto.SerializeToString(&func_bytes);
}
return py::bytes(func_bytes);
Expand Down
9 changes: 6 additions & 3 deletions onnx/defs/generator/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,8 @@ ONNX_OPERATOR_SET_SCHEMA(
"T",
{"tensor(float)", "tensor(double)", "tensor(int16)", "tensor(int32)", "tensor(int64)"},
"Constrain input types to common numeric type tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
sub_result = Sub (limit, start)
sub_result_casted = Cast <to = 1> (sub_result)
Expand All @@ -496,7 +497,8 @@ ONNX_OPERATOR_SET_SCHEMA(
range = Identity (prev)
}>
}
)ONNX")
)ONNX",
11)
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
// Type inference
propagateElemTypeFromInputToOutput(ctx, 0, 0);
Expand Down Expand Up @@ -618,5 +620,6 @@ ONNX_OPERATOR_SET_SCHEMA(
.Add("output = Cast (X_greater)", "to", int64_t(dtype));
schema.BuildFunction(functionProto);
return true;
}));
},
15));
} // namespace ONNX_NAMESPACE
12 changes: 8 additions & 4 deletions onnx/defs/logical/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,15 @@ ONNX_OPERATOR_SET_SCHEMA(
.TypeConstraint("T", OpSchema::all_numeric_types_ir4(), "Constrain input types to all numeric tensors.")
.TypeConstraint("T1", {"tensor(bool)"}, "Constrain output to boolean tensor.")
.TypeAndShapeInferenceFunction(InferenceFunction())
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
O1 = Less (A, B)
O2 = Equal (A, B)
C = Or (O1, O2)
}
)ONNX"));
)ONNX",
16));

ONNX_OPERATOR_SET_SCHEMA(
GreaterOrEqual,
Expand All @@ -212,13 +214,15 @@ ONNX_OPERATOR_SET_SCHEMA(
.TypeConstraint("T", OpSchema::all_numeric_types_ir4(), "Constrain input types to all numeric tensors.")
.TypeConstraint("T1", {"tensor(bool)"}, "Constrain output to boolean tensor.")
.TypeAndShapeInferenceFunction(InferenceFunction())
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
O1 = Greater (A, B)
O2 = Equal (A, B)
C = Or (O1, O2)
}
)ONNX"));
)ONNX",
16));

static const char* BitwiseNot_ver18_doc = R"DOC(
Returns the bitwise not of the input tensor element-wise.
Expand Down
12 changes: 8 additions & 4 deletions onnx/defs/logical/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -224,13 +224,15 @@ ONNX_OPERATOR_SET_SCHEMA(
.TypeConstraint("T", OpSchema::all_numeric_types(), "Constrain input types to all numeric tensors.")
.TypeConstraint("T1", {"tensor(bool)"}, "Constrain output to boolean tensor.")
.TypeAndShapeInferenceFunction(InferenceFunction())
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
O1 = Less (A, B)
O2 = Equal (A, B)
C = Or (O1, O2)
}
)ONNX"));
)ONNX",
12));

ONNX_OPERATOR_SET_SCHEMA(
GreaterOrEqual,
Expand All @@ -240,13 +242,15 @@ ONNX_OPERATOR_SET_SCHEMA(
.TypeConstraint("T", OpSchema::all_numeric_types(), "Constrain input types to all numeric tensors.")
.TypeConstraint("T1", {"tensor(bool)"}, "Constrain output to boolean tensor.")
.TypeAndShapeInferenceFunction(InferenceFunction())
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
O1 = Greater (A, B)
O2 = Equal (A, B)
C = Or (O1, O2)
}
)ONNX"));
)ONNX",
12));

ONNX_OPERATOR_SET_SCHEMA(
Equal,
Expand Down
55 changes: 35 additions & 20 deletions onnx/defs/math/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,8 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(bfloat16)", "tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
Alpha = Constant <value_float: float = @alpha>()
AlphaCast = CastLike (Alpha, X)
Expand All @@ -339,7 +340,8 @@ ONNX_OPERATOR_SET_SCHEMA(
AlphaMulX = Mul (AlphaCast, X)
Y = Where (XLessThanZero, AlphaMulX, X)
}
)ONNX"));
)ONNX",
16));

static const char* ThresholdedRelu_ver10_doc = R"DOC(
ThresholdedRelu takes one input data (Tensor<T>) and produces one output data
Expand Down Expand Up @@ -483,13 +485,15 @@ ONNX_OPERATOR_SET_SCHEMA(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input X and output types to float tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
Softplus_X = Softplus (X)
TanHSoftplusX = Tanh (Softplus_X)
Y = Mul (X, TanHSoftplusX)
}
)ONNX")
)ONNX",
18)
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));

static const char* celu_ver12_doc = R"DOC(
Expand Down Expand Up @@ -557,7 +561,7 @@ ONNX_OPERATOR_SET_SCHEMA(
AttributeProto::FLOAT,
celu_default_alpha)
.TypeConstraint("T", {"tensor(float)"}, "Constrain input and output types to float32 tensors.")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyCelu)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyCelu, 12)
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));

static const char* gelu_ver20_doc = R"DOC(
Expand Down Expand Up @@ -641,7 +645,7 @@ ONNX_OPERATOR_SET_SCHEMA(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)", "tensor(bfloat16)"},
"Constrain input and output types to float tensors.")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyGelu)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyGelu, 20)
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));

static const char* Exp_ver13_doc = R"DOC(
Expand Down Expand Up @@ -813,15 +817,17 @@ ONNX_OPERATOR_SET_SCHEMA(
"tensor(int64)"},
"Constrain input and output types to float/int tensors.")
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
Zero = Constant <value = float {0.0}>()
ZeroCast = CastLike(Zero, X)
XLessThanZero = Less (X, ZeroCast)
SlopeMulX = Mul (slope, X)
Y = Where(XLessThanZero, SlopeMulX, X)
}
)ONNX"));
)ONNX",
16));

static const char* Sigmoid_ver13_doc = R"DOC(
Sigmoid takes one input data (Tensor<T>) and produces one output data
Expand Down Expand Up @@ -899,12 +905,14 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput)
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
HS_X = HardSigmoid<alpha = 0.16666667163372, beta = 0.5>(X)
Y = Mul (X, HS_X)
}
)ONNX"));
)ONNX",
14));

// Generate opschema for element-wise ops. Leaves type constraint "T"
// unspecified.
Expand Down Expand Up @@ -1067,7 +1075,7 @@ ONNX_OPERATOR_SET_SCHEMA(
"T",
OpSchema::all_numeric_types_ir4(),
"Constrain input and output types to all numeric tensors.")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyClip)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodyClip, 13)
.TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput));

std::function<void(OpSchema&)>
Expand Down Expand Up @@ -1158,7 +1166,8 @@ ONNX_OPERATOR_SET_SCHEMA(

schema.BuildFunction(functionProto);
return true;
})
},
13)
// function body builder for opset version 18.
// ReduceSum is updated in opset 18 to have axes as the second input.
// Therefore function body for opset version 18
Expand Down Expand Up @@ -2505,7 +2514,7 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input, weight, and output types to floating-point tensors.")
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody, 13)
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
// Type inference
propagateElemTypeFromInputToOutput(ctx, 0, 0);
Expand Down Expand Up @@ -2862,7 +2871,7 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(float16)", "tensor(float)", "tensor(double)", "tensor(bfloat16)"},
"Constrain input and output types to float tensors.")
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodySCE)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodySCE, 13)
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
propagateElemTypeFromInputToOutput(ctx, 0, 0);
std::string reduction = getAttribute(ctx, "reduction", "mean");
Expand Down Expand Up @@ -3164,7 +3173,8 @@ ONNX_OPERATOR_SET_SCHEMA(
.FillUsing(CosineSumWindowOpDocGenerator("Hann"))
.TypeConstraint("T1", {"tensor(int32)", "tensor(int64)"}, "Constrain the input size to int64_t.")
.TypeConstraint("T2", OpSchema::all_numeric_types_ir4(), "Constrain output types to numeric tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
A0 = Constant <value = float {0.5}>()
A1 = Constant <value = float {0.5}>()
Expand Down Expand Up @@ -3193,7 +3203,8 @@ ONNX_OPERATOR_SET_SCHEMA(
Temp1 = Add (Temp0, A2_Component)
output = Cast <to : int = @output_datatype> (Temp1)
}
)ONNX"));
)ONNX",
17));

ONNX_OPERATOR_SET_SCHEMA(
HammingWindow,
Expand All @@ -3202,7 +3213,8 @@ ONNX_OPERATOR_SET_SCHEMA(
.FillUsing(CosineSumWindowOpDocGenerator("Hamming"))
.TypeConstraint("T1", {"tensor(int32)", "tensor(int64)"}, "Constrain the input size to int64_t.")
.TypeConstraint("T2", OpSchema::all_numeric_types_ir4(), "Constrain output types to numeric tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
A0 = Constant <value = float {0.54347826087}>()
A1 = Constant <value = float {0.45652173913}>()
Expand Down Expand Up @@ -3231,7 +3243,8 @@ ONNX_OPERATOR_SET_SCHEMA(
Temp1 = Add (Temp0, A2_Component)
output = Cast <to : int = @output_datatype> (Temp1)
}
)ONNX"));
)ONNX",
17));

ONNX_OPERATOR_SET_SCHEMA(
BlackmanWindow,
Expand All @@ -3240,7 +3253,8 @@ ONNX_OPERATOR_SET_SCHEMA(
.FillUsing(CosineSumWindowOpDocGenerator("Blackman"))
.TypeConstraint("T1", {"tensor(int32)", "tensor(int64)"}, "Constrain the input size to int64_t.")
.TypeConstraint("T2", OpSchema::all_numeric_types_ir4(), "Constrain output types to numeric tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
A0 = Constant <value = float {0.42}>()
A1 = Constant <value = float {0.5}>()
Expand Down Expand Up @@ -3269,7 +3283,8 @@ ONNX_OPERATOR_SET_SCHEMA(
Temp1 = Add (Temp0, A2_Component)
output = Cast <to : int = @output_datatype> (Temp1)
}
)ONNX"));
)ONNX",
17));

static const char* MelWeightMatrix_ver17_doc = R"DOC(
Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale.
Expand Down
4 changes: 2 additions & 2 deletions onnx/defs/math/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,7 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input, weight, and output types to floating-point tensors.")
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody_opset12)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBody_opset12, 12)
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
// Type inference
propagateElemTypeFromInputToOutput(ctx, 0, 0);
Expand Down Expand Up @@ -1477,7 +1477,7 @@ ONNX_OPERATOR_SET_SCHEMA(
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeConstraint("Tind", {"tensor(int32)", "tensor(int64)"}, "Constrain target to integer types")
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodySCE_opset12)
.SetContextDependentFunctionBodyBuilder(BuildContextDependentFunctionBodySCE_opset12, 12)
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
propagateElemTypeFromInputToOutput(ctx, 0, 0);
std::string reduction = getAttribute(ctx, "reduction", "mean");
Expand Down
9 changes: 6 additions & 3 deletions onnx/defs/nn/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2259,7 +2259,8 @@ ONNX_OPERATOR_SET_SCHEMA(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)", "tensor(bfloat16)"},
"Constrain input and output types to all numeric tensors.")
.FunctionBody(R"ONNX(
.FunctionBody(
R"ONNX(
{
Exponent = Constant <value = float {2.0}>()
Epsilon = Constant <value = float {1e-9}>()
Expand All @@ -2273,7 +2274,8 @@ ONNX_OPERATOR_SET_SCHEMA(
Processed_STD = Add (STD, Epsilon)
Y = Div (X_variance, Processed_STD)
}
)ONNX")
)ONNX",
13)
.FunctionBody(
R"ONNX(
{
Expand Down Expand Up @@ -2849,5 +2851,6 @@ ONNX_OPERATOR_SET_SCHEMA(

schema.BuildFunction(functionProto);
return true;
}));
},
21));
} // namespace ONNX_NAMESPACE
31 changes: 17 additions & 14 deletions onnx/defs/nn/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -255,19 +255,21 @@
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to all numeric tensors.")
.FunctionBody(FunctionBodyHelper::BuildNodes(
{// nodes: {outputs, op, inputs, attributes}
FunctionBodyHelper::Const<float>("Exponent", 2.0f),
FunctionBodyHelper::Const<float>("Epsilon", float(1e-9)),
{{"X_RM"}, "ReduceMean", {"X"}, {MakeRefAttribute("axes", AttributeProto::INTS)}},
{{"EX_squared"}, "Pow", {"X_RM", "Exponent"}},
{{"X_squared"}, "Pow", {"X", "Exponent"}},
{{"E_Xsquared"}, "ReduceMean", {"X_squared"}, {MakeRefAttribute("axes", AttributeProto::INTS)}},
{{"Variance"}, "Sub", {"E_Xsquared", "EX_squared"}},
{{"STD"}, "Sqrt", {"Variance"}},
{{"X_variance"}, "Sub", {"X", "X_RM"}},
{{"Processed_STD"}, "Add", {"STD", "Epsilon"}},
{{"Y"}, "Div", {"X_variance", "Processed_STD"}}})));
.FunctionBody(
FunctionBodyHelper::BuildNodes(
{// nodes: {outputs, op, inputs, attributes}
FunctionBodyHelper::Const<float>("Exponent", 2.0f),
FunctionBodyHelper::Const<float>("Epsilon", float(1e-9)),

Check warning on line 262 in onnx/defs/nn/old.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[cpplint] reported by reviewdog 🐶 Using deprecated casting style. Use static_cast<float>(...) instead [readability/casting] [4] Raw Output: onnx/defs/nn/old.cc:262: Using deprecated casting style. Use static_cast<float>(...) instead [readability/casting] [4]
{{"X_RM"}, "ReduceMean", {"X"}, {MakeRefAttribute("axes", AttributeProto::INTS)}},
{{"EX_squared"}, "Pow", {"X_RM", "Exponent"}},
{{"X_squared"}, "Pow", {"X", "Exponent"}},
{{"E_Xsquared"}, "ReduceMean", {"X_squared"}, {MakeRefAttribute("axes", AttributeProto::INTS)}},
{{"Variance"}, "Sub", {"E_Xsquared", "EX_squared"}},
{{"STD"}, "Sqrt", {"Variance"}},
{{"X_variance"}, "Sub", {"X", "X_RM"}},
{{"Processed_STD"}, "Add", {"STD", "Epsilon"}},
{{"Y"}, "Div", {"X_variance", "Processed_STD"}}}),
9));

const char* pads_doc2 =
"Padding for the beginning and ending along each spatial axis, it can take any value greater "
Expand Down Expand Up @@ -2546,5 +2548,6 @@

schema.BuildFunction(functionProto);
return true;
}));
},
18));
} // namespace ONNX_NAMESPACE