Skip to content

Commit

Permalink
Remove cloudpickle stuff
Browse files Browse the repository at this point in the history
  • Loading branch information
mariosasko committed Jul 26, 2022
1 parent 2674f0c commit c3bc52d
Showing 1 changed file with 1 addition and 43 deletions.
44 changes: 1 addition & 43 deletions src/datasets/utils/py_utils.py
Expand Up @@ -21,15 +21,14 @@
import functools
import itertools
import os
import pickle
import re
import types
from contextlib import contextmanager
from io import BytesIO as StringIO
from multiprocessing import Pool, RLock
from shutil import disk_usage
from types import CodeType, FunctionType
from typing import Callable, ClassVar, Dict, Generic, List, Optional, Tuple, Union
from typing import Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse

import dill
Expand Down Expand Up @@ -559,47 +558,6 @@ def proxy(func):
return proxy


class _CloudPickleTypeHintFix:
"""
Type hints can't be properly pickled in python < 3.7
CloudPickle provided a way to make it work in older versions.
This class provide utilities to fix pickling of type hints in older versions.
from https://github.com/cloudpipe/cloudpickle/pull/318/files
"""

def _is_parametrized_type_hint(obj):
# This is very cheap but might generate false positives.
origin = getattr(obj, "__origin__", None) # typing Constructs
values = getattr(obj, "__values__", None) # typing_extensions.Literal
type_ = getattr(obj, "__type__", None) # typing_extensions.Final
return origin is not None or values is not None or type_ is not None

def _create_parametrized_type_hint(origin, args):
return origin[args]

def _save_parametrized_type_hint(pickler, obj):
# The distorted type check sematic for typing construct becomes:
# ``type(obj) is type(TypeHint)``, which means "obj is a
# parametrized TypeHint"
if type(obj) is type(Literal): # pragma: no branch
initargs = (Literal, obj.__values__)
elif type(obj) is type(Final): # pragma: no branch
initargs = (Final, obj.__type__)
elif type(obj) is type(ClassVar):
initargs = (ClassVar, obj.__type__)
elif type(obj) in [type(Union), type(Tuple), type(Generic)]:
initargs = (obj.__origin__, obj.__args__)
elif type(obj) is type(Callable):
args = obj.__args__
if args[0] is Ellipsis:
initargs = (obj.__origin__, args)
else:
initargs = (obj.__origin__, (list(args[:-1]), args[-1]))
else: # pragma: no cover
raise pickle.PicklingError(f"Datasets pickle Error: Unknown type {type(obj)}")
pickler.save_reduce(_CloudPickleTypeHintFix._create_parametrized_type_hint, initargs, obj=obj)


@pklregister(CodeType)
def _save_code(pickler, obj):
"""
Expand Down

1 comment on commit c3bc52d

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Show benchmarks

PyArrow==6.0.0

Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.007089 / 0.011353 (-0.004264) 0.003596 / 0.011008 (-0.007413) 0.028639 / 0.038508 (-0.009869) 0.031429 / 0.023109 (0.008320) 0.267863 / 0.275898 (-0.008035) 0.334926 / 0.323480 (0.011446) 0.006795 / 0.007986 (-0.001191) 0.005005 / 0.004328 (0.000676) 0.006961 / 0.004250 (0.002710) 0.043929 / 0.037052 (0.006877) 0.313708 / 0.258489 (0.055219) 0.339881 / 0.293841 (0.046040) 0.029323 / 0.128546 (-0.099224) 0.008803 / 0.075646 (-0.066843) 0.238512 / 0.419271 (-0.180759) 0.048466 / 0.043533 (0.004933) 0.287410 / 0.255139 (0.032271) 0.297179 / 0.283200 (0.013980) 0.094440 / 0.141683 (-0.047243) 1.351470 / 1.452155 (-0.100685) 1.393275 / 1.492716 (-0.099442)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.217297 / 0.018006 (0.199291) 0.458683 / 0.000490 (0.458193) 0.004346 / 0.000200 (0.004146) 0.000096 / 0.000054 (0.000041)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.025906 / 0.037411 (-0.011506) 0.105709 / 0.014526 (0.091183) 0.124358 / 0.176557 (-0.052198) 0.166526 / 0.737135 (-0.570609) 0.125417 / 0.296338 (-0.170921)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.414602 / 0.215209 (0.199393) 4.266561 / 2.077655 (2.188906) 1.697675 / 1.504120 (0.193555) 1.528030 / 1.541195 (-0.013165) 1.660920 / 1.468490 (0.192430) 0.388680 / 4.584777 (-4.196097) 3.788501 / 3.745712 (0.042789) 1.961196 / 5.269862 (-3.308665) 1.264402 / 4.565676 (-3.301275) 0.049423 / 0.424275 (-0.374852) 0.011037 / 0.007607 (0.003430) 0.491588 / 0.226044 (0.265544) 4.838773 / 2.268929 (2.569845) 2.143503 / 55.444624 (-53.301121) 1.843096 / 6.876477 (-5.033381) 1.934717 / 2.142072 (-0.207356) 0.487475 / 4.805227 (-4.317752) 0.106405 / 6.500664 (-6.394259) 0.057415 / 0.075469 (-0.018054)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.531867 / 1.841788 (-0.309921) 13.290258 / 8.074308 (5.215950) 24.192725 / 10.191392 (14.001333) 0.920501 / 0.680424 (0.240077) 0.540107 / 0.534201 (0.005906) 0.360259 / 0.579283 (-0.219025) 0.413468 / 0.434364 (-0.020896) 0.272165 / 0.540337 (-0.268172) 0.255449 / 1.386936 (-1.131487)
PyArrow==latest
Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.005535 / 0.011353 (-0.005818) 0.003680 / 0.011008 (-0.007328) 0.025802 / 0.038508 (-0.012706) 0.031140 / 0.023109 (0.008031) 0.283206 / 0.275898 (0.007308) 0.348053 / 0.323480 (0.024573) 0.003328 / 0.007986 (-0.004658) 0.003474 / 0.004328 (-0.000855) 0.004811 / 0.004250 (0.000560) 0.043698 / 0.037052 (0.006645) 0.287613 / 0.258489 (0.029124) 0.356187 / 0.293841 (0.062346) 0.026699 / 0.128546 (-0.101847) 0.009557 / 0.075646 (-0.066089) 0.236475 / 0.419271 (-0.182796) 0.052716 / 0.043533 (0.009183) 0.276286 / 0.255139 (0.021147) 0.322170 / 0.283200 (0.038971) 0.098760 / 0.141683 (-0.042923) 1.383421 / 1.452155 (-0.068734) 1.366956 / 1.492716 (-0.125760)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.216595 / 0.018006 (0.198589) 0.466947 / 0.000490 (0.466458) 0.006202 / 0.000200 (0.006003) 0.000097 / 0.000054 (0.000043)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.024058 / 0.037411 (-0.013353) 0.110444 / 0.014526 (0.095918) 0.129062 / 0.176557 (-0.047495) 0.166490 / 0.737135 (-0.570645) 0.129114 / 0.296338 (-0.167224)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.379305 / 0.215209 (0.164096) 3.736623 / 2.077655 (1.658968) 1.879756 / 1.504120 (0.375636) 1.681379 / 1.541195 (0.140184) 1.723940 / 1.468490 (0.255450) 0.392819 / 4.584777 (-4.191958) 3.878044 / 3.745712 (0.132332) 1.904026 / 5.269862 (-3.365836) 1.222630 / 4.565676 (-3.343046) 0.053931 / 0.424275 (-0.370344) 0.009845 / 0.007607 (0.002237) 0.484091 / 0.226044 (0.258047) 4.669559 / 2.268929 (2.400630) 2.126025 / 55.444624 (-53.318599) 1.822032 / 6.876477 (-5.054445) 1.880968 / 2.142072 (-0.261104) 0.488812 / 4.805227 (-4.316415) 0.111257 / 6.500664 (-6.389407) 0.062037 / 0.075469 (-0.013432)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.633184 / 1.841788 (-0.208604) 14.001179 / 8.074308 (5.926871) 23.358149 / 10.191392 (13.166757) 0.877401 / 0.680424 (0.196977) 0.527681 / 0.534201 (-0.006520) 0.363734 / 0.579283 (-0.215549) 0.442177 / 0.434364 (0.007813) 0.263494 / 0.540337 (-0.276843) 0.284656 / 1.386936 (-1.102280)

CML watermark

Please sign in to comment.