Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Pylint] Making hexagon tests pylint compliant Part 1 of N #12082

Merged
merged 3 commits into from Jul 19, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 8 additions & 0 deletions tests/lint/pylint.sh
Expand Up @@ -24,3 +24,11 @@ python3 -m pylint tests/python/contrib/test_cmsisnn --rcfile="$(dirname "$0")"/p
python3 -m pylint tests/python/relay/aot/*.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/ci --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/integration/ --rcfile="$(dirname "$0")"/pylintrc

# tests/python/contrib/test_hexagon tests
python3 -m pylint tests/python/contrib/test_hexagon/benchmark_util.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_hexagon/conftest.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_hexagon/conv2d/test_conv2d_blocked.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_hexagon/infrastructure.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_hexagon/test_2d_physical_buffers.py --rcfile="$(dirname "$0")"/pylintrc
113 changes: 51 additions & 62 deletions tests/python/contrib/test_hexagon/benchmark_util.py
Expand Up @@ -15,11 +15,13 @@
# specific language governing permissions and limitations
# under the License.

""" Utility functions used for benchmarks """

import csv
import os
import pytest
import tempfile
import collections

import pytest


def skip_bencharks_flag_and_reason():
Expand All @@ -37,22 +39,8 @@ def skip_bencharks_flag_and_reason():

if asn == "simulator":
return (True, "Skipping benchmarks when ANDROID_SERIAL_NUMBER='simluator'")
else:
return (False, "")


class UnsupportedException(Exception):
"""
Indicates that the specified benchmarking configuration is known to
currently be unsupported. The Exception message may provide more detail.
"""


class NumericalAccuracyException(Exception):
"""
Indicates that the benchmarking configuration appeared to run successfully,
but the output data didn't have the expected accuracy.
"""
return (False, "")


class UnsupportedException(Exception):
Expand Down Expand Up @@ -183,9 +171,10 @@ def print_csv(self, f, column_name_order, timing_decimal_places=3):
]:
if col_name in csv_line_dict:
old_value = csv_line_dict[col_name]
assert isinstance(
old_value, float
), f"Formatting code assumes that column {col_name} is some col_nameind of float, but its actual type is {type(old_value)}"
assert isinstance(old_value, float), (
f"Formatting code assumes that column {col_name} is"
f" some col_nameind of float, but its actual type is {type(old_value)}"
)
str_value = f"{old_value:>0.{timing_decimal_places}f}"
csv_line_dict[col_name] = str_value

Expand All @@ -204,16 +193,16 @@ def get_benchmark_id(keys_dict):
Note that the insertion order for `keys_dict` affects the computed name.
"""
# Creat a copy, because we might be modifying it.
d = dict(keys_dict)
keys_dict_copy = dict(keys_dict)

# Sniff for shape-like lists, because we want them in a form that's both
# readable and filesystem-friendly...
for k, v in d.items():
if isinstance(v, list) or isinstance(v, tuple):
v2 = "_".join([str(x) for x in v])
d[k] = v2
for k, v in keys_dict_copy.items():
if isinstance(v, (list, tuple)):
v_str = "_".join([str(x) for x in v])
keys_dict_copy[k] = v_str

return "-".join([f"{k}:{v}" for k, v in d.items()])
return "-".join([f"{k}:{v}" for k, v in keys_dict_copy.items()])


def get_benchmark_decription(keys_dict):
Expand All @@ -226,44 +215,44 @@ def get_benchmark_decription(keys_dict):
return " ".join([f"{k}={v}" for k, v in keys_dict.items()])


# This fixture provides some initialization / finalization logic for groups of related
# benchmark runs.
# See the fixture implementation below for details.
#
# The fixture's mechanics are described here: https://stackoverflow.com/a/63047695
#
# TODO: There may be cleaner ways to let each class that uses this fixture provide its
# own value for `csv_column_order`.
#
# TODO: In the future we may wish to break this fixture up in to several smaller ones.
#
# The overall contract for a class (e.g. `MyTest`) using this fixture is as follows:
#
# https://stackoverflow.com/a/63047695
#
# @pytest.mark.usefixtures("benchmark_group")
# class MyTest:
#
# # The fixture requires that this class variable is defined before
# # the fixture's finalizer-logic executes.
# #
# # This is used as an argument to BenchmarkTable.print_csv(...) after
# # all of MyTest's unit tests have executed.
# csv_column_order = [
# ...
# ]
#
# # Before the MyTest's first unit test executes, the fixture will populate the
# # following class variables:
# MyTest.working_dir : str
# MyTest.benchmark_table : BenchmarkTable
@pytest.fixture(scope="class")
def benchmark_group(request):
"""This fixture provides some initialization / finalization logic for groups of related
benchmark runs.
See the fixture implementation below for details.

The fixture's mechanics are described here: https://stackoverflow.com/a/63047695

TODO: There may be cleaner ways to let each class that uses this fixture provide its
own value for `csv_column_order`.

TODO: In the future we may wish to break this fixture up in to several smaller ones.

The overall contract for a class (e.g. `MyTest`) using this fixture is as follows:

https://stackoverflow.com/a/63047695

@pytest.mark.usefixtures("benchmark_group")
class MyTest:

# The fixture requires that this class variable is defined before
# the fixture's finalizer-logic executes.
#
# This is used as an argument to BenchmarkTable.print_csv(...) after
# all of MyTest's unit tests have executed.
csv_column_order = [
...
]

# Before the MyTest's first unit test executes, the fixture will populate the
# following class variables:
MyTest.working_dir : str
MyTest.benchmark_table : BenchmarkTable"""
working_dir = tempfile.mkdtemp()
bt = BenchmarksTable()
table = BenchmarksTable()

request.cls.working_dir = working_dir
request.cls.benchmark_table = bt
request.cls.benchmark_table = table

yield

Expand All @@ -272,14 +261,14 @@ def benchmark_group(request):
if not hasattr(request.cls, "csv_column_order"):
raise Exception('Classes using this fixture must have a member named "csv_column_order"')

with open(tabular_output_filename, "w") as csv_file:
bt.print_csv(csv_file, request.cls.csv_column_order)
with open(tabular_output_filename, "w", encoding="UTF-8") as csv_file:
table.print_csv(csv_file, request.cls.csv_column_order)

print()
print("*" * 80)
print(f"BENCHMARK RESULTS FILE: {tabular_output_filename}")
print("*" * 80)
print()

if bt.has_fail() > 0:
if table.has_fail() > 0:
pytest.fail("At least one benchmark configuration failed", pytrace=False)
8 changes: 2 additions & 6 deletions tests/python/contrib/test_hexagon/conftest.py
Expand Up @@ -18,12 +18,8 @@
""" Hexagon testing fixtures used to deduce testing argument
values from testing parameters """


import pytest

import tvm
import tvm.testing

# Disabling invalid-name check as the name is expected to be exactly this by pytest
# pylint: disable=invalid-name
pytest_plugins = [
"tvm.contrib.hexagon.pytest_plugin",
]
98 changes: 55 additions & 43 deletions tests/python/contrib/test_hexagon/conv2d/test_conv2d_blocked.py
Expand Up @@ -15,13 +15,13 @@
# specific language governing permissions and limitations
# under the License.

import sys
""" Hexagon contrib tests for blocked conv2d """

import platform

import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm import te, topi
from tvm.topi import testing

from ..infrastructure import (
Expand All @@ -33,9 +33,6 @@
get_packed_shape,
)

import numpy as np
import pytest


def conv2d_nhwc8h8w32c(
shape_input,
Expand All @@ -57,72 +54,84 @@ def conv2d_nhwc8h8w32c(
"""

# nhwc layout
X = te.placeholder(shape_input, dtype=dtype, name="logical_input")
logical_input = te.placeholder(shape_input, dtype=dtype, name="logical_input")

# oihw8i32o4i layout
filt_packed = te.placeholder(shape_filter, dtype=dtype, name="packed_filter")

block_H, block_W, block_C = get_block_shape()
block_h, block_w, block_c = get_block_shape()

# Calculate padded input
N, H, W, C = shape_input
pad_h = (block_H - ((H + pad[1]) % block_H)) % block_H
pad_w = (block_W - ((W + pad[3]) % block_W)) % block_W
X_pad = topi.nn.pad(
X, [0, pad[0], pad[2], 0], [0, pad_h, pad_w, 0], pad_value=0, name="padded_input"
_, height, width, _ = shape_input
pad_h = (block_h - ((height + pad[1]) % block_h)) % block_h
pad_w = (block_w - ((width + pad[3]) % block_w)) % block_w
padded_input = topi.nn.pad(
logical_input,
[0, pad[0], pad[2], 0],
[0, pad_h, pad_w, 0],
pad_value=0,
name="padded_input",
)

# Calculate packed input
packed_shape = get_packed_shape(X_pad.shape)
X_packed = te.compute(
packed_shape = get_packed_shape(padded_input.shape)
packed_input = te.compute(
packed_shape,
lambda n, ho, wo, co, hi, wi, ci: X_pad[
n, ho * block_H + hi, wo * block_W + wi, co * block_C + ci
lambda n, ho, wo, co, hi, wi, ci: padded_input[
n, ho * block_h + hi, wo * block_w + wi, co * block_c + ci
],
name="packed_input",
)

output_shape, compute = conv2d_compute(X_packed, filt_packed, pad, stride, dilation)
Y = te.compute(output_shape, compute, name="packed_output")
s = te.create_schedule(Y.op)
output_shape, compute = conv2d_compute(packed_input, filt_packed, pad, stride, dilation)
packed_output = te.compute(output_shape, compute, name="packed_output")
s = te.create_schedule(packed_output.op)

# Ensure the padding and array packing is performed inline
s[X_pad].compute_inline()
s[X_packed].compute_inline()
s[padded_input].compute_inline()
s[packed_input].compute_inline()

# cache reads and writes
Xl = s.cache_read(X_packed, storage_scope, [Y])
Fl = s.cache_read(filt_packed, storage_scope, [Y])
Yl = s.cache_write(Y, storage_scope)
cached_input = s.cache_read(packed_input, storage_scope, [packed_output])
cached_filt = s.cache_read(filt_packed, storage_scope, [packed_output])
cached_output = s.cache_write(packed_output, storage_scope)

# cache write schedule
n, ho, wo, ko, hi, wi, ki = s[Y].op.axis
koo, koi = s[Y].split(ko, factor=k_split_factor)
hoo, hoi = s[Y].split(ho, factor=h_split_factor)
s[Y].reorder(n, koo, hoo, koi, hoi, wo, hi, wi, ki)
s[Yl].compute_at(s[Y], hoo)
batch, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[packed_output].op.axis
koo, koi = s[packed_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[packed_output].split(h_outer, factor=h_split_factor)
s[packed_output].reorder(batch, koo, hoo, koi, hoi, w_outer, h_inner, w_inner, k_inner)
s[cached_output].compute_at(s[packed_output], hoo)

# compute schedule
n, ho, wo, ko, hi, wi, ki = s[Yl].op.axis
rh, rw, rc = s[Yl].op.reduce_axis
rco, rci = s[Yl].split(rc, factor=block_C)
koo, koi = s[Yl].split(ko, factor=k_split_factor)
hoo, hoi = s[Yl].split(ho, factor=h_split_factor)
s[Yl].reorder(n, koo, hoo, koi, hoi, wo, rco, hi, wi, ki, rci)
s[Xl].compute_at(s[Yl], hoo)
s[Fl].compute_at(s[Yl], hoo)
batch, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[cached_output].op.axis
_, _, reduce_c = s[cached_output].op.reduce_axis
rco, rci = s[cached_output].split(reduce_c, factor=block_c)
koo, koi = s[cached_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[cached_output].split(h_outer, factor=h_split_factor)
s[cached_output].reorder(
batch, koo, hoo, koi, hoi, w_outer, rco, h_inner, w_inner, k_inner, rci
)
s[cached_input].compute_at(s[cached_output], hoo)
s[cached_filt].compute_at(s[cached_output], hoo)

binds = {}
if storage_scope and storage_scope != "global":
with tvm.transform.PassContext():
Xb = tvm.tir.decl_buffer(packed_shape, name="Xb", dtype=dtype, scope=storage_scope)
Yb = tvm.tir.decl_buffer(output_shape, name="Yb", dtype=dtype, scope=storage_scope)
binds = {X: Xb, Y: Yb}
input_buffer = tvm.tir.decl_buffer(
packed_shape, name="Xb", dtype=dtype, scope=storage_scope
)
output_buffer = tvm.tir.decl_buffer(
output_shape, name="Yb", dtype=dtype, scope=storage_scope
)
binds = {logical_input: input_buffer, packed_output: output_buffer}

return (s, [X, filt_packed, Y], binds)
return (s, [logical_input, filt_packed, packed_output], binds)


class BaseConv2d:
"""Base class for conv2d tests"""

# input
batch = tvm.testing.parameter(1)
in_size = tvm.testing.parameter(64)
Expand All @@ -139,6 +148,8 @@ class BaseConv2d:


class TestConv2dPackedFilter(BaseConv2d):
"""Conv2d packed filter test class"""

@tvm.testing.parametrize_targets("llvm")
@tvm.testing.skip_if_32bit(reason="Test known to be flaky on i386 machines")
def test_conv2d(
Expand All @@ -155,6 +166,7 @@ def test_conv2d(
dtype,
target,
):
"""conv2d test"""
# TODO: no support for dilation
dilation = 1

Expand Down