Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Wrap TAPAS integration test forward passes with torch.no_grad() #19416

Merged
merged 1 commit into from Oct 10, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
37 changes: 22 additions & 15 deletions tests/models/tapas/test_modeling_tapas.py
Expand Up @@ -570,7 +570,8 @@ def test_inference_no_head(self):
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
outputs = model(**inputs)
with torch.no_grad():
outputs = model(**inputs)
# test the sequence output
expected_slice = torch.tensor(
[
Expand Down Expand Up @@ -608,7 +609,8 @@ def test_inference_question_answering_head_conversational(self):
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
outputs = model(**inputs)
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
Expand Down Expand Up @@ -657,7 +659,8 @@ def test_inference_question_answering_head_conversational_absolute_embeddings(se
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
outputs = model(**inputs)
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
Expand Down Expand Up @@ -705,7 +708,8 @@ def test_inference_question_answering_head_weak_supervision(self):
inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt")
inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()}

outputs = model(**inputs_on_device)
with torch.no_grad():
outputs = model(**inputs_on_device)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((2, 28))
Expand Down Expand Up @@ -774,15 +778,16 @@ def test_training_question_answering_head_weak_supervision(self):
float_answer = torch.FloatTensor(float_answer).to(torch_device)

# forward pass to get loss + logits:
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)
with torch.no_grad():
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)

# test the loss
loss = outputs.loss
Expand Down Expand Up @@ -829,7 +834,8 @@ def test_inference_question_answering_head_strong_supervision(self):
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
outputs = model(**inputs)
with torch.no_grad():
outputs = model(**inputs)
# test the logits
logits = outputs.logits
expected_shape = torch.Size((1, 21))
Expand Down Expand Up @@ -884,7 +890,8 @@ def test_inference_classification_head(self):
table, queries = prepare_tapas_single_inputs_for_inference()
inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt")
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
outputs = model(**inputs)
with torch.no_grad():
outputs = model(**inputs)

# test the classification logits
logits = outputs.logits
Expand Down