Skip to content

Commit

Permalink
[ETHOSN] Improve inferring new shape of the Reshape operator (#12594)
Browse files Browse the repository at this point in the history
Fixes the case when reshape is > 4 dims. While this cannot be offloaded to the NPU, the check was previously producing an error preventing further compilation. The correct behavior is to ensure the check returns False and not offload the reshape.
  • Loading branch information
NicolaLancellotti committed Aug 31, 2022
1 parent c2824a8 commit acbbd9f
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 16 deletions.
2 changes: 0 additions & 2 deletions python/tvm/relay/op/contrib/ethosn.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,8 +360,6 @@ def reshape(expr):
"""Check if a reshape is supported by Ethos-N."""
if not ethosn_available():
return False
if not _is_ethosn_composite(expr.args[0]):
return False

return _ethosn.reshape(expr)

Expand Down
18 changes: 9 additions & 9 deletions src/relay/backend/contrib/ethosn/ethosn_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
#include <utility>
#include <vector>

#include "../../../op/tensor/transform.h"
#include "ethosn_support_library/Support.hpp"
#include "ethosn_support_library/SupportQueries.hpp"
#include "tvm/relay/qnn/attrs.h"
Expand Down Expand Up @@ -300,15 +299,16 @@ EthosnError EthosnAPI::Reshape(const Expr& expr, ReshapeParams* params) {
sl::DataType input_data_type;
EthosnError err = Tvm2Npu(input_dtype->shape, &input_tensor_shape);
err += Tvm2Npu(input_dtype->dtype, &input_data_type);
int tensor_size = 1;
for (const auto& dim : input_tensor_shape) {
tensor_size *= dim;
}

Array<IndexExpr> inferred_shape = {1, 1, 1, 1};
Array<IndexExpr> new_shape = InferNewShape(input_dtype->shape, reshape->attrs, false);
for (size_t i = 0; i < new_shape.size(); ++i) {
inferred_shape.Set(i, new_shape[i]);
Array<IndexExpr> inferred_shape;
Array<IndexExpr> new_shape = reshape->checked_type().as<TensorTypeNode>()->shape;
if (new_shape.size() < 4) {
inferred_shape = {1, 1, 1, 1};
for (size_t i = 0; i < new_shape.size(); ++i) {
inferred_shape.Set(i, new_shape[i]);
}
} else {
inferred_shape = new_shape;
}

err += Tvm2Npu(inferred_shape, &params->new_shape);
Expand Down
4 changes: 2 additions & 2 deletions tests/python/contrib/test_ethosn/test_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def test_resnet_50_int8():
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=11,
host_ops=10,
npu_partitions=2,
)

Expand Down Expand Up @@ -211,6 +211,6 @@ def test_ssd_mobilenet_v1():
input_dict={"normalized_input_image_tensor": (1, 300, 300, 3)},
compile_hash=_compile_hash,
output_count=4,
host_ops=28,
host_ops=27,
npu_partitions=2,
)
25 changes: 22 additions & 3 deletions tests/python/contrib/test_ethosn/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,8 @@
def _get_model(input_shape, output_shape, dtype):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=input_shape, dtype=dtype)
conv, params = tei.get_conv2d(a, input_shape, dtype)
req = relay.reshape(conv, output_shape)
return req, params
req = relay.reshape(a, output_shape)
return req, {}


@requires_ethosn
Expand All @@ -53,6 +52,8 @@ def _get_model(input_shape, output_shape, dtype):
],
)
def test_reshape(dtype, input_shape, output_shape):
"""Compare Reshape output with TVM."""

np.random.seed(0)
inputs = {
"a": tvm.nd.array(
Expand All @@ -71,3 +72,21 @@ def test_reshape(dtype, input_shape, output_shape):
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))

tei.verify(outputs, dtype, 1)


@requires_ethosn
@pytest.mark.parametrize(
"input_shape, output_shape",
[
(
(1, 13, 13, 255),
(1, 13, 13, 3, 85),
),
],
)
def test_reshape_failure(input_shape, output_shape):
"""Check Resize is not offloaded."""

model, params = _get_model(input_shape, output_shape, "int8")
mod = tei.make_module(model, params)
tei.build(mod, params, expected_host_ops=1, npu_partitions=0)

0 comments on commit acbbd9f

Please sign in to comment.