Skip to content

Commit

Permalink
[1.13.1rc2] More cherry picks for fixing function shape inference (#4886
Browse files Browse the repository at this point in the history
)

* Fix attribute substitution within subgraphs during function type/shape inference (#4792)

* Fix attribute substitution within subgraphs in function inference

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Delete commented out dead code

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Address PR feedback

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Fix lint issue

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* Handle variants of constant op in shape inference (#4824)

* Fix attribute substitution within subgraphs in function inference

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Delete commented out dead code

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Address PR feedback

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Fix lint issue

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Handle constant op variants

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Fix whitespace

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Add test case

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Run lintrunner

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

---------

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* Fix parser bug in handling non-tensor types (#4863)

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* Fix function shape inference bug (#4880)

* Fix function shape inference bug

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

* Fix lintrunner issues

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>

---------

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
Co-authored-by: Chun-Wei Chen <jacky82226@gmail.com>
Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* bump as 1.13.1rc2

Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* freeze ORT 1.13.1 to unblock failure

Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

* do not verify 3.11 ORT

Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>

---------

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
Signed-off-by: Chun-Wei Chen <jacky82226@gmail.com>
Co-authored-by: G. Ramalingam <grama@microsoft.com>
  • Loading branch information
jcwchen and gramalingam committed Feb 13, 2023
1 parent 59a48e7 commit e192ba0
Show file tree
Hide file tree
Showing 6 changed files with 334 additions and 21 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/release_mac.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ jobs:
- name: Verify ONNX with ONNX Runtime PyPI package
if: matrix.python-version != '3.11'
run: |
python -m pip install -q onnxruntime
python -m pip install -q onnxruntime==1.13.1
python onnx/test/test_with_ort.py
# Only triggered by weekly event on certain CI
Expand Down
2 changes: 1 addition & 1 deletion VERSION_NUMBER
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.13.1rc1
1.13.1rc2
12 changes: 11 additions & 1 deletion onnx/defs/parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,17 @@ Status OnnxParser::Parse(TensorProto& tensorProto, const TypeProto& tensorTypePr
bool OnnxParser::NextIsType() {
std::string id("");
(void)PeekIdentifier(id);
return (PrimitiveTypeNameMap::IsTypeName(id));
if (PrimitiveTypeNameMap::IsTypeName(id))
return true;
switch (KeyWordMap::Lookup(id)) {
case KeyWordMap::KeyWord::SEQ_TYPE:
case KeyWordMap::KeyWord::MAP_TYPE:
case KeyWordMap::KeyWord::OPTIONAL_TYPE:
case KeyWordMap::KeyWord::SPARSE_TENSOR_TYPE:
return true;
default:
return false;
}
}

Status OnnxParser::ParseSingleAttributeValue(AttributeProto& attr) {
Expand Down
104 changes: 86 additions & 18 deletions onnx/shape_inference/implementation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -284,21 +284,56 @@ class ShapeInferenceImplBase {
}
}

template <typename T>
void addTemporaryConstant(const std::string& name, const T& vector) {
input_data_by_name_holder[name] = ToTensor(vector);
input_data_by_name[name] = &input_data_by_name_holder[name];
}

void preprocess(const NodeProto& n) {
if (checker::check_is_experimental_op(n)) {
has_experimental_op = true;
} else if (n.op_type() == "Constant" && n.output().size() == 1) {
const std::string& output_name = n.output(0);
for (const auto& attr : n.attribute()) {
if (attr.name() == "value") {
if (attr.type() == AttributeProto::TENSOR && attr.has_t()) {
input_data_by_name[n.output(0)] = &attr.t();
if (reuse_constant_tensors) {
input_data_by_name[output_name] = &attr.t();
} else {
input_data_by_name_holder[output_name] = attr.t();
input_data_by_name[output_name] = &input_data_by_name_holder[output_name];
}
} else if (attr.type() == AttributeProto::SPARSE_TENSOR && attr.has_sparse_tensor()) {
input_sparse_data_by_name[n.output(0)] = &attr.sparse_tensor();
if (reuse_constant_tensors) {
input_sparse_data_by_name[output_name] = &attr.sparse_tensor();
}
}
} else {
switch (attr.type()) {
case AttributeProto::INTS: {
std::vector<int64_t> ints{attr.ints().begin(), attr.ints().end()};
addTemporaryConstant(output_name, ints);
break;
}
case AttributeProto::INT: {
std::vector<int64_t> ints({attr.i()});
addTemporaryConstant(output_name, ints);
break;
}
case AttributeProto::FLOATS: {
std::vector<float> floats{attr.floats().begin(), attr.floats().end()};
addTemporaryConstant(output_name, floats);
break;
}
case AttributeProto::FLOAT: {
std::vector<float> floats({attr.f()});
addTemporaryConstant(output_name, floats);
break;
}
default:
break;
}
} else if (attr.type() == AttributeProto::INTS && attr.name() == "value_ints") {
std::vector<int64_t> ints{attr.ints().begin(), attr.ints().end()};
input_data_by_name_holder[n.output(0)] = ToTensor(ints);
input_data_by_name[n.output(0)] = &input_data_by_name_holder[n.output(0)];
}
}
}
Expand Down Expand Up @@ -486,25 +521,52 @@ class ShapeInferenceImplBase {
}
}

void process(const NodeProto& n, std::unordered_map<std::string, const AttributeProto*> attr_map) {
NodeProto copy_n(n);
// Add attribute information into the temporary node
copy_n.clear_attribute();
for (const auto& attr : n.attribute()) {
if (attr.has_ref_attr_name()) {
if (attr_map.count(attr.ref_attr_name())) {
auto copy_attr = *attr_map[attr.ref_attr_name()];
copy_attr.set_name(attr.name());
copy_n.add_attribute()->CopyFrom(copy_attr);
void replaceAttrRefs(NodeProto& n, std::unordered_map<std::string, const AttributeProto*> attr_map) {
auto& attributes = *n.mutable_attribute();
for (auto attr_iter = attributes.begin(); attr_iter != attributes.end();) {
auto& attr = *attr_iter;
if (!attr.ref_attr_name().empty()) {
// Attribute-references must be replaced by the corresponding attribute-value in the call-node
// if the call-node contains the attribute. Otherwise, this attribute must be removed.
auto entry = attr_map.find(attr.ref_attr_name());
if (entry != attr_map.cend()) {
// Copy value of attribute, but retain original name:
std::string name = attr.name();
attr = *(entry->second);
attr.set_name(name);
} else {
attr_iter = attributes.erase(attr_iter);
continue;
}
} else {
copy_n.add_attribute()->CopyFrom(attr);
}
// Subgraphs must be recursively processed.
if (attr.has_g()) {
replaceAttrRefs(*attr.mutable_g(), attr_map);
}
for (auto& graph : *attr.mutable_graphs()) {
replaceAttrRefs(graph, attr_map);
}
++attr_iter;
}
}

void replaceAttrRefs(GraphProto& graph, std::unordered_map<std::string, const AttributeProto*> attr_map) {
for (auto& n : *graph.mutable_node()) {
replaceAttrRefs(n, attr_map);
}
}

void process(const NodeProto& n, std::unordered_map<std::string, const AttributeProto*> attr_map) {
NodeProto copy_n(n);
replaceAttrRefs(copy_n, attr_map);
process(copy_n);
}

void process(const FunctionProto& func_proto, InferenceContext& ctx) {
// Ensure Constant node tensor-attributes are copied
bool old_reuse_constant_tensors = reuse_constant_tensors;
reuse_constant_tensors = false;

// Get a temporary tensor-shape map
const auto num_func_inputs = func_proto.input_size();
std::vector<TypeProto> types_cache(num_func_inputs);
Expand Down Expand Up @@ -548,6 +610,8 @@ class ShapeInferenceImplBase {
type_proto->CopyFrom(*(iter->second));
}
}

reuse_constant_tensors = old_reuse_constant_tensors;
}

public:
Expand Down Expand Up @@ -609,6 +673,10 @@ class ShapeInferenceImplBase {
std::vector<std::string> inference_errors;

std::list<TypeProto> initializer_type_list;

// reuse_constant_tensors: controls whether we need to copy tensors occurring as attributes
// in Constant nodes. We avoid it for inference for graphs, but must make a copy for functions.
bool reuse_constant_tensors = true;
};

static void InferShapesImpl(
Expand Down
28 changes: 28 additions & 0 deletions onnx/test/cpp/parser_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -490,5 +490,33 @@ square (x) => (y) {
CheckModel(code);
}

TEST(ParserTest, TypesModelTest1) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 18 ]
>
agraph (seq(float[N]) seqX) => (float[M, N] X)
{
X = ConcatFromSequence < axis = 0, new_axis = 1 >(seqX)
}
)ONNX";
CheckModel(code);
}

TEST(ParserTest, TypesModelTest2) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 18 ]
>
agraph (float[N] tensorX, seq(float[N]) seqX, map(int32, float[N]) mapX, optional(float[N]) optionalX, sparse_tensor(float[N]) sparseX) => (float[N] X)
{
X = Identity (tensorX)
}
)ONNX";
CheckModel(code);
}

} // namespace Test
} // namespace ONNX_NAMESPACE

0 comments on commit e192ba0

Please sign in to comment.