Skip to content

Commit

Permalink
Remove
Browse files Browse the repository at this point in the history
  • Loading branch information
amyeroberts committed Oct 10, 2022
1 parent 0d2852c commit 5fa46be
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 14 deletions.
6 changes: 3 additions & 3 deletions src/transformers/models/whisper/modeling_tf_whisper.py
Expand Up @@ -1030,7 +1030,7 @@ def call(
>>> from transformers import TFWhisperModel, WhisperFeatureExtractor
>>> from datasets import load_dataset
>>> model = TFWhisperModel.from_pretrained("openai/whisper-base", from_pt=True)
>>> model = TFWhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(
Expand Down Expand Up @@ -1157,7 +1157,7 @@ def call(
>>> from transformers import TFWhisperModel, WhisperFeatureExtractor
>>> from datasets import load_dataset
>>> model = TFWhisperModel.from_pretrained("openai/whisper-base", from_pt=True)
>>> model = TFWhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(
Expand Down Expand Up @@ -1281,7 +1281,7 @@ def call(
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
>>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
Expand Down
22 changes: 11 additions & 11 deletions tests/models/whisper/test_modeling_tf_whisper.py
Expand Up @@ -644,7 +644,7 @@ def _load_datasamples(self, num_samples):
@slow
def test_tiny_logits_librispeech(self):
set_seed(0)
model = TFWhisperModel.from_pretrained("openai/whisper-tiny", from_pt=True)
model = TFWhisperModel.from_pretrained("openai/whisper-tiny")
input_speech = self._load_datasamples(1)
feature_extractor = WhisperFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="tf").input_features
Expand Down Expand Up @@ -687,7 +687,7 @@ def test_tiny_logits_librispeech(self):
@slow
def test_small_en_logits_librispeech(self):
set_seed(0)
model = TFWhisperModel.from_pretrained("openai/whisper-small.en", from_pt=True)
model = TFWhisperModel.from_pretrained("openai/whisper-small.en")

input_speech = self._load_datasamples(1)

Expand Down Expand Up @@ -721,7 +721,7 @@ def test_small_en_logits_librispeech(self):
def test_large_logits_librispeech(self):
set_seed(0)

model = TFWhisperModel.from_pretrained("openai/whisper-large", from_pt=True)
model = TFWhisperModel.from_pretrained("openai/whisper-large")

input_speech = self._load_datasamples(1)

Expand Down Expand Up @@ -757,7 +757,7 @@ def test_large_logits_librispeech(self):
def test_tiny_en_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
model.config.decoder_start_token_id = 50257

input_speech = self._load_datasamples(1)
Expand All @@ -776,7 +776,7 @@ def test_tiny_en_generation(self):
def test_tiny_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")

input_speech = self._load_datasamples(1)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand All @@ -794,7 +794,7 @@ def test_tiny_generation(self):
def test_tiny_xla_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")

input_speech = self._load_datasamples(1)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand All @@ -818,7 +818,7 @@ def test_tiny_xla_generation(self):
def test_large_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large")

input_speech = self._load_datasamples(1)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand All @@ -837,7 +837,7 @@ def test_large_generation(self):
def test_large_generation_multilingual(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large")

ds = load_dataset("common_voice", "ja", split="test", streaming=True)
ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000))
Expand Down Expand Up @@ -872,7 +872,7 @@ def test_large_generation_multilingual(self):
def test_large_batched_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large")

input_speech = self._load_datasamples(4)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand Down Expand Up @@ -907,7 +907,7 @@ def test_large_batched_generation(self):
def test_tiny_en_batched_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")

input_speech = self._load_datasamples(4)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand Down Expand Up @@ -943,7 +943,7 @@ def test_tiny_en_batched_generation(self):
def test_tiny_en_batched_xla_generation(self):
set_seed(0)
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")

input_speech = self._load_datasamples(4)
input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features
Expand Down

0 comments on commit 5fa46be

Please sign in to comment.