Skip to content

Commit

Permalink
Disable test
Browse files Browse the repository at this point in the history
  • Loading branch information
mariosasko committed Jul 26, 2022
1 parent 59a1e3d commit 9c2b16c
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -88,7 +88,7 @@
"huggingface-hub>=0.1.0,<1.0.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
"responses==0.16",
"responses<0.19",
]

AUDIO_REQUIRE = [
Expand Down
8 changes: 4 additions & 4 deletions tests/test_arrow_dataset.py
Expand Up @@ -3117,10 +3117,10 @@ def test_pickle_dataset_after_transforming_the_table(in_memory, method_and_param
assert dataset._data.table == reloaded_dataset._data.table


# @pytest.mark.skipif(
# os.name in ["nt", "posix"] and (os.getenv("CIRCLECI") == "true" or os.getenv("GITHUB_ACTIONS") == "true"),
# reason='On Windows CircleCI or GitHub Actions, it raises botocore.exceptions.EndpointConnectionError: Could not connect to the endpoint URL: "http://127.0.0.1:5555/test"',
# ) # TODO: find what's wrong with CircleCI / GitHub Actions
@pytest.mark.skipif(
os.name in ["nt", "posix"] and (os.getenv("CIRCLECI") == "true" or os.getenv("GITHUB_ACTIONS") == "true"),
reason='On Windows CircleCI or GitHub Actions, it raises botocore.exceptions.EndpointConnectionError: Could not connect to the endpoint URL: "http://127.0.0.1:5555/test"',
) # TODO: find what's wrong with CircleCI / GitHub Actions
@require_s3
def test_dummy_dataset_serialize_s3(s3, dataset, s3_test_bucket_name):
mock_bucket = s3_test_bucket_name
Expand Down
8 changes: 4 additions & 4 deletions tests/test_dataset_dict.py
Expand Up @@ -663,10 +663,10 @@ def test_datasetdict_from_text_split(split, text_path, tmp_path):
assert all(dataset[split].split == split for split in path.keys())


# @pytest.mark.skipif(
# os.name in ["nt", "posix"] and (os.getenv("CIRCLECI") == "true" or os.getenv("GITHUB_ACTIONS") == "true"),
# reason='On Windows CircleCI or GitHub Actions, it raises botocore.exceptions.EndpointConnectionError: Could not connect to the endpoint URL: "http://127.0.0.1:5555/test"',
# ) # TODO: find what's wrong with CircleCI / GitHub Actions
@pytest.mark.skipif(
os.name in ["nt", "posix"] and (os.getenv("CIRCLECI") == "true" or os.getenv("GITHUB_ACTIONS") == "true"),
reason='On Windows CircleCI or GitHub Actions, it raises botocore.exceptions.EndpointConnectionError: Could not connect to the endpoint URL: "http://127.0.0.1:5555/test"',
) # TODO: find what's wrong with CircleCI / GitHub Actions
@require_s3
def test_dummy_dataset_serialize_s3(s3, dataset, s3_test_bucket_name):
dsets = DatasetDict({"train": dataset, "test": dataset.select(range(2))})
Expand Down

1 comment on commit 9c2b16c

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Show benchmarks

PyArrow==6.0.0

Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.009690 / 0.011353 (-0.001663) 0.004207 / 0.011008 (-0.006801) 0.039926 / 0.038508 (0.001418) 0.034331 / 0.023109 (0.011222) 0.362115 / 0.275898 (0.086217) 0.425061 / 0.323480 (0.101581) 0.006835 / 0.007986 (-0.001150) 0.003978 / 0.004328 (-0.000351) 0.009986 / 0.004250 (0.005735) 0.056758 / 0.037052 (0.019706) 0.362974 / 0.258489 (0.104485) 0.425516 / 0.293841 (0.131675) 0.045544 / 0.128546 (-0.083003) 0.018744 / 0.075646 (-0.056902) 0.318114 / 0.419271 (-0.101158) 0.058078 / 0.043533 (0.014545) 0.366462 / 0.255139 (0.111323) 0.391829 / 0.283200 (0.108629) 0.103058 / 0.141683 (-0.038625) 1.758440 / 1.452155 (0.306285) 1.762509 / 1.492716 (0.269792)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.222774 / 0.018006 (0.204768) 0.512684 / 0.000490 (0.512195) 0.007878 / 0.000200 (0.007678) 0.000113 / 0.000054 (0.000058)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.027353 / 0.037411 (-0.010059) 0.106508 / 0.014526 (0.091982) 0.129038 / 0.176557 (-0.047519) 0.201143 / 0.737135 (-0.535992) 0.126131 / 0.296338 (-0.170207)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.599315 / 0.215209 (0.384105) 6.117380 / 2.077655 (4.039726) 2.415226 / 1.504120 (0.911106) 1.978494 / 1.541195 (0.437299) 1.958880 / 1.468490 (0.490390) 0.695052 / 4.584777 (-3.889725) 5.304511 / 3.745712 (1.558799) 2.931329 / 5.269862 (-2.338533) 2.036663 / 4.565676 (-2.529014) 0.081108 / 0.424275 (-0.343167) 0.012406 / 0.007607 (0.004799) 0.759997 / 0.226044 (0.533952) 7.309396 / 2.268929 (5.040467) 3.054882 / 55.444624 (-52.389742) 2.336991 / 6.876477 (-4.539485) 2.469305 / 2.142072 (0.327233) 0.919285 / 4.805227 (-3.885942) 0.190070 / 6.500664 (-6.310594) 0.076320 / 0.075469 (0.000851)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.882604 / 1.841788 (0.040816) 15.867815 / 8.074308 (7.793507) 41.335581 / 10.191392 (31.144189) 1.126477 / 0.680424 (0.446054) 0.698075 / 0.534201 (0.163874) 0.477070 / 0.579283 (-0.102213) 0.604211 / 0.434364 (0.169847) 0.349219 / 0.540337 (-0.191118) 0.375039 / 1.386936 (-1.011897)
PyArrow==latest
Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.008707 / 0.011353 (-0.002646) 0.004680 / 0.011008 (-0.006328) 0.031912 / 0.038508 (-0.006596) 0.034947 / 0.023109 (0.011837) 0.334173 / 0.275898 (0.058275) 0.410018 / 0.323480 (0.086538) 0.004966 / 0.007986 (-0.003020) 0.003941 / 0.004328 (-0.000387) 0.005237 / 0.004250 (0.000987) 0.053457 / 0.037052 (0.016405) 0.357028 / 0.258489 (0.098539) 0.402926 / 0.293841 (0.109085) 0.044824 / 0.128546 (-0.083723) 0.014419 / 0.075646 (-0.061228) 0.294580 / 0.419271 (-0.124691) 0.063752 / 0.043533 (0.020219) 0.353615 / 0.255139 (0.098476) 0.416992 / 0.283200 (0.133792) 0.121462 / 0.141683 (-0.020221) 1.694349 / 1.452155 (0.242195) 1.761394 / 1.492716 (0.268678)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.223399 / 0.018006 (0.205393) 0.501345 / 0.000490 (0.500855) 0.005869 / 0.000200 (0.005669) 0.000117 / 0.000054 (0.000063)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.029066 / 0.037411 (-0.008345) 0.117905 / 0.014526 (0.103379) 0.145252 / 0.176557 (-0.031304) 0.194774 / 0.737135 (-0.542361) 0.146062 / 0.296338 (-0.150277)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.599106 / 0.215209 (0.383897) 5.864123 / 2.077655 (3.786468) 2.377134 / 1.504120 (0.873014) 1.972553 / 1.541195 (0.431358) 2.110647 / 1.468490 (0.642157) 0.745097 / 4.584777 (-3.839680) 5.388034 / 3.745712 (1.642322) 5.232735 / 5.269862 (-0.037126) 2.637616 / 4.565676 (-1.928061) 0.085102 / 0.424275 (-0.339173) 0.012719 / 0.007607 (0.005112) 0.785539 / 0.226044 (0.559495) 7.566647 / 2.268929 (5.297719) 2.972130 / 55.444624 (-52.472494) 2.431408 / 6.876477 (-4.445068) 2.482340 / 2.142072 (0.340268) 0.926603 / 4.805227 (-3.878624) 0.184751 / 6.500664 (-6.315913) 0.071523 / 0.075469 (-0.003946)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.846559 / 1.841788 (0.004771) 16.107328 / 8.074308 (8.033020) 44.362254 / 10.191392 (34.170862) 1.157688 / 0.680424 (0.477264) 0.727389 / 0.534201 (0.193188) 0.514964 / 0.579283 (-0.064319) 0.624466 / 0.434364 (0.190102) 0.358502 / 0.540337 (-0.181835) 0.358447 / 1.386936 (-1.028489)

CML watermark

Please sign in to comment.