Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable UFMT on test_decomp.py, test_expanded_weights.py and some files #125117

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 0 additions & 6 deletions .lintrunner.toml
Expand Up @@ -1052,12 +1052,6 @@ exclude_patterns = [
'test/quantization/fx/test_quantize_fx.py',
'test/quantization/fx/test_subgraph_rewriter.py',
'test/test_datapipe.py',
'test/test_decomp.py',
'test/test_deploy.py',
'test/test_determination.py',
'test/test_dlpack.py',
'test/test_dynamic_shapes.py',
'test/test_expanded_weights.py',
'test/test_fake_tensor.py',
'test/test_flop_counter.py',
'test/test_function_schema.py',
Expand Down
537 changes: 339 additions & 198 deletions test/test_decomp.py

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion test/test_deploy.py
Expand Up @@ -3,9 +3,10 @@
import textwrap
import types

from torch.utils._freeze import Freezer, PATH_MARKER
from torch.testing._internal.common_utils import run_tests, TestCase

from torch.utils._freeze import Freezer, PATH_MARKER


class TestFreezer(TestCase):
"""Tests the freeze.py script"""
Expand Down
17 changes: 10 additions & 7 deletions test/test_determination.py
Expand Up @@ -3,7 +3,7 @@
import os

import run_test
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_utils import run_tests, TestCase


class DummyOptions:
Expand All @@ -30,7 +30,9 @@ def determined_tests(cls, changed_files):
return [
test
for test in cls.TESTS
if run_test.should_run_test(run_test.TARGET_DET_LIST, test, changed_files, DummyOptions())
if run_test.should_run_test(
run_test.TARGET_DET_LIST, test, changed_files, DummyOptions()
)
]

def test_target_det_list_is_sorted(self):
Expand All @@ -42,9 +44,7 @@ def test_target_det_list_is_sorted(self):

def test_config_change_only(self):
"""CI configs trigger all tests"""
self.assertEqual(
self.determined_tests([".ci/pytorch/test.sh"]), self.TESTS
)
self.assertEqual(self.determined_tests([".ci/pytorch/test.sh"]), self.TESTS)

def test_run_test(self):
"""run_test.py is imported by determination tests"""
Expand All @@ -68,14 +68,17 @@ def test_cpp_file(self):
def test_test_file(self):
"""Test files trigger themselves and dependent tests"""
self.assertEqual(
self.determined_tests(["test/test_jit.py"]), ["test_jit_profiling", "test_jit"]
self.determined_tests(["test/test_jit.py"]),
["test_jit_profiling", "test_jit"],
)
self.assertEqual(
self.determined_tests(["test/jit/test_custom_operators.py"]),
["test_jit_profiling", "test_jit"],
)
self.assertEqual(
self.determined_tests(["test/quantization/eager/test_quantize_eager_ptq.py"]),
self.determined_tests(
["test/quantization/eager/test_quantize_eager_ptq.py"]
),
["test_quantization"],
)

Expand Down
68 changes: 59 additions & 9 deletions test/test_dlpack.py
Expand Up @@ -2,11 +2,16 @@

import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, IS_JETSON
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, dtypes, skipMeta, skipCUDAIfRocm,
onlyNativeDeviceTypes)
dtypes,
instantiate_device_type_tests,
onlyCUDA,
onlyNativeDeviceTypes,
skipCUDAIfRocm,
skipMeta,
)
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_utils import IS_JETSON, run_tests, TestCase
from torch.utils.dlpack import from_dlpack, to_dlpack


Expand All @@ -15,15 +20,33 @@ class TestTorchDlPack(TestCase):

@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
@dtypes(
*all_types_and_complex_and(
torch.half,
torch.bfloat16,
torch.bool,
torch.uint16,
torch.uint32,
torch.uint64,
)
)
def test_dlpack_capsule_conversion(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(to_dlpack(x))
self.assertEqual(z, x)

@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
@dtypes(
*all_types_and_complex_and(
torch.half,
torch.bfloat16,
torch.bool,
torch.uint16,
torch.uint32,
torch.uint64,
)
)
def test_dlpack_protocol_conversion(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(x)
Expand Down Expand Up @@ -62,15 +85,33 @@ def test_dlpack_conversion_with_streams(self, device, dtype):

@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
@dtypes(
*all_types_and_complex_and(
torch.half,
torch.bfloat16,
torch.bool,
torch.uint16,
torch.uint32,
torch.uint64,
)
)
def test_from_dlpack(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
self.assertEqual(x, y)

@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
@dtypes(
*all_types_and_complex_and(
torch.half,
torch.bfloat16,
torch.bool,
torch.uint16,
torch.uint32,
torch.uint64,
)
)
def test_from_dlpack_noncontinguous(self, device, dtype):
x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5)

Expand Down Expand Up @@ -113,7 +154,16 @@ def test_dlpack_conversion_with_diff_streams(self, device, dtype):

@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
@dtypes(
*all_types_and_complex_and(
torch.half,
torch.bfloat16,
torch.bool,
torch.uint16,
torch.uint32,
torch.uint64,
)
)
def test_from_dlpack_dtype(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
Expand Down Expand Up @@ -204,5 +254,5 @@ def test_dlpack_normalize_strides(self):

instantiate_device_type_tests(TestTorchDlPack, globals())

if __name__ == '__main__':
if __name__ == "__main__":
run_tests()