From ab1c10a15eb75a99666c45e60fd2d943e5d98dfe Mon Sep 17 00:00:00 2001 From: daspartho Date: Sat, 8 Oct 2022 00:30:28 +0530 Subject: [PATCH] wrap forward passes with torch.no_grad() --- tests/models/tapas/test_modeling_tapas.py | 37 ++++++++++++++--------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/models/tapas/test_modeling_tapas.py b/tests/models/tapas/test_modeling_tapas.py index b7b4af6e5a2ad..271a5efc96163 100644 --- a/tests/models/tapas/test_modeling_tapas.py +++ b/tests/models/tapas/test_modeling_tapas.py @@ -570,7 +570,8 @@ def test_inference_no_head(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the sequence output expected_slice = torch.tensor( [ @@ -608,7 +609,8 @@ def test_inference_question_answering_head_conversational(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -657,7 +659,8 @@ def test_inference_question_answering_head_conversational_absolute_embeddings(se table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -705,7 +708,8 @@ def test_inference_question_answering_head_weak_supervision(self): inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs_on_device) + with torch.no_grad(): + outputs = model(**inputs_on_device) # test the logits logits = outputs.logits expected_shape = torch.Size((2, 28)) @@ -774,15 +778,16 @@ def test_training_question_answering_head_weak_supervision(self): float_answer = torch.FloatTensor(float_answer).to(torch_device) # forward pass to get loss + logits: - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - labels=labels, - numeric_values=numeric_values, - numeric_values_scale=numeric_values_scale, - float_answer=float_answer, - ) + with torch.no_grad(): + outputs = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + labels=labels, + numeric_values=numeric_values, + numeric_values_scale=numeric_values_scale, + float_answer=float_answer, + ) # test the loss loss = outputs.loss @@ -829,7 +834,8 @@ def test_inference_question_answering_head_strong_supervision(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -884,7 +890,8 @@ def test_inference_classification_head(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the classification logits logits = outputs.logits