Skip to content

Commit

Permalink
[AutoAccept][Codemod][Replace deprecated unittest asserts] fbcode//fa…
Browse files Browse the repository at this point in the history
…im/mmf

Differential Revision: D51702249

fbshipit-source-id: 90df3f5a87cdad64aa4c6f365607ee5ceff5c5dd
  • Loading branch information
generatedunixname2443911735787003 authored and facebook-github-bot committed Nov 30, 2023
1 parent e005267 commit 6074484
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion tests/models/test_uniter.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_forward(self):
output = embedding(
self.img_feat, self.img_pos_feat, self.type_embeddings, img_masks=None
)
self.assertEquals(list(output.shape), [32, 100, 256])
self.assertEqual(list(output.shape), [32, 100, 256])


class TestUNITERModelBase(unittest.TestCase):
Expand Down
14 changes: 7 additions & 7 deletions tests/trainers/lightning/test_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def _assert_same_dict(self, mmf, lightning, same=True):
def _assert_same(self, obj1, obj2, same=True):
if same:
if hasattr(obj1, "mean") and obj1.dtype == torch.float:
self.assertAlmostEquals(obj1.mean().item(), obj2.mean().item(), 2)
self.assertAlmostEqual(obj1.mean().item(), obj2.mean().item(), 2)
elif hasattr(obj1, "item"):
self.assertEqual(obj1.item(), obj2.item())
elif type(obj1) is dict and type(obj2) is dict:
Expand Down Expand Up @@ -328,11 +328,11 @@ def test_load_mmf_trainer_checkpoint_in_lightning(self):
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
self.assertEquals(lightning.trainer.global_step, 6)
self.assertEqual(lightning.trainer.global_step, 6)
call_args_list = mock_method.call_args_list
# training will take place 0 times. Since max_steps is the same
# as the checkpoint's global_step
self.assertEquals(len(call_args_list), 0)
self.assertEqual(len(call_args_list), 0)

# check to make sure that the lightning trainer's model and
# mmf's are the same
Expand Down Expand Up @@ -393,7 +393,7 @@ def test_load_trainer_ckpt_number_of_steps(self):
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
self.assertEquals(lightning.trainer.global_step, 12)
self.assertEqual(lightning.trainer.global_step, 12)
call_args_list = [l[0][4] for l in mock_method.call_args_list]
# in lightning 1.6.0 last batch idx from ckpt is repeated
self.assertListEqual(list(range(5, 11)), call_args_list)
Expand Down Expand Up @@ -455,7 +455,7 @@ def test_lightning_checkpoint_interval(self):
# https://github.com/PyTorchLightning/pytorch-lightning/pull/6997
# also was an issue according to test_validation.py
files = os.listdir(os.path.join(tmp_d, "models"))
self.assertEquals(3, len(files))
self.assertEqual(3, len(files))
indexes = {int(x[:-5].split("=")[1]) for x in files}
self.assertSetEqual({2, 4, 6}, indexes)

Expand Down Expand Up @@ -511,8 +511,8 @@ def _load_checkpoint_and_test(self, filename, ckpt_config=None):

# Make sure lightning and mmf parity
self._assert_same_dict(mmf_ckpt["model"], lightning_ckpt["state_dict"])
self.assertEquals(mmf_ckpt["current_epoch"], lightning_ckpt["epoch"] + 1)
self.assertEquals(mmf_ckpt["num_updates"], lightning_ckpt["global_step"])
self.assertEqual(mmf_ckpt["current_epoch"], lightning_ckpt["epoch"] + 1)
self.assertEqual(mmf_ckpt["num_updates"], lightning_ckpt["global_step"])
self._assert_same_dict(
mmf_ckpt["optimizer"], lightning_ckpt["optimizer_states"][0]
)

0 comments on commit 6074484

Please sign in to comment.