Skip to content

Commit

Permalink
Support hfh 0.10 implicit auth (#5031)
Browse files Browse the repository at this point in the history
* support hfh 0.10 implicit auth

* update tests

* Bump minimum hfh to 0.2.0 and test minimum version

* style

* fix test

* fix tests

* again

* lucain's comment

* fix ci
  • Loading branch information
lhoestq committed Sep 30, 2022
1 parent 1ea4d09 commit 365884d
Show file tree
Hide file tree
Showing 9 changed files with 143 additions and 50 deletions.
14 changes: 7 additions & 7 deletions .github/workflows/ci.yml
Expand Up @@ -38,7 +38,7 @@ jobs:
matrix:
test: ['unit', 'integration']
os: [ubuntu-latest, windows-latest]
pyarrow_version: [latest, 6.0.1]
deps_versions: [latest, minimum]
continue-on-error: ${{ matrix.test == 'integration' }}
runs-on: ${{ matrix.os }}
steps:
Expand All @@ -63,12 +63,12 @@ jobs:
run: |
pip install .[tests]
pip install -r additional-tests-requirements.txt --no-deps
- name: Install latest PyArrow
if: ${{ matrix.pyarrow_version == 'latest' }}
run: pip install pyarrow --upgrade
- name: Install PyArrow ${{ matrix.pyarrow_version }}
if: ${{ matrix.pyarrow_version != 'latest' }}
run: pip install pyarrow==${{ matrix.pyarrow_version }}
- name: Install dependencies (latest versions)
if: ${{ matrix.deps_versions == 'latest' }}
run: pip install --upgrade pyarrow huggingface-hub
- name: Install depencencies (minimum versions)
if: ${{ matrix.deps_versions != 'latest' }}
run: pip install pyarrow==6.0.1 huggingface-hub==0.2.0 transformers
- name: Test with pytest
run: |
python -m pytest -rfExX -m ${{ matrix.test }} -n 2 --dist loadfile -sv ./tests/
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Expand Up @@ -89,7 +89,8 @@
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface-hub>=0.1.0,<1.0.0",
# minimum 0.2.0 for set_access_token
"huggingface-hub>=0.2.0,<1.0.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
"responses<0.19",
Expand Down
3 changes: 2 additions & 1 deletion src/datasets/arrow_dataset.py
Expand Up @@ -102,6 +102,7 @@
from .tasks import TaskTemplate
from .utils import logging
from .utils._hf_hub_fixes import create_repo
from .utils._hf_hub_fixes import list_repo_files as hf_api_list_repo_files
from .utils.file_utils import _retry, cached_path, estimate_dataset_size, hf_hub_url
from .utils.info_utils import is_small_dataset
from .utils.py_utils import asdict, convert_file_size_to_int, unique_values
Expand Down Expand Up @@ -4288,7 +4289,7 @@ def shards_with_embedded_external_files(shards):

shards = shards_with_embedded_external_files(shards)

files = api.list_repo_files(repo_id, repo_type="dataset", revision=branch, token=token)
files = hf_api_list_repo_files(api, repo_id, repo_type="dataset", revision=branch, token=token)
data_files = [file for file in files if file.startswith("data/")]

def path_in_repo(_index, shard):
Expand Down
21 changes: 8 additions & 13 deletions src/datasets/load.py
Expand Up @@ -29,7 +29,7 @@

import fsspec
import requests
from huggingface_hub import HfApi, HfFolder
from huggingface_hub import HfApi

from . import config
from .arrow_dataset import Dataset
Expand Down Expand Up @@ -62,6 +62,7 @@
)
from .splits import Split
from .tasks import TaskTemplate
from .utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils.deprecation_utils import deprecated
from .utils.file_utils import (
OfflineModeIsEnabled,
Expand Down Expand Up @@ -736,14 +737,11 @@ def __init__(
increase_load_count(name, resource_type="dataset")

def get_module(self) -> DatasetModule:
if isinstance(self.download_config.use_auth_token, bool):
token = HfFolder.get_token() if self.download_config.use_auth_token else None
else:
token = self.download_config.use_auth_token
hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
hfh_dataset_info = hf_api_dataset_info(
HfApi(config.HF_ENDPOINT),
self.name,
revision=self.revision,
token=token if token else "no-token",
use_auth_token=self.download_config.use_auth_token,
timeout=100.0,
)
patterns = (
Expand Down Expand Up @@ -1104,14 +1102,11 @@ def dataset_module_factory(
_raise_if_offline_mode_is_enabled()
hf_api = HfApi(config.HF_ENDPOINT)
try:
if isinstance(download_config.use_auth_token, bool):
token = HfFolder.get_token() if download_config.use_auth_token else None
else:
token = download_config.use_auth_token
dataset_info = hf_api.dataset_info(
dataset_info = hf_api_dataset_info(
hf_api,
repo_id=path,
revision=revision,
token=token if token else "no-token",
use_auth_token=download_config.use_auth_token,
timeout=100.0,
)
except Exception as e: # noqa: catch any exception of hf_hub and consider that the dataset doesn't exist
Expand Down
79 changes: 77 additions & 2 deletions src/datasets/utils/_hf_hub_fixes.py
@@ -1,7 +1,8 @@
from typing import Optional
from typing import List, Optional, Union

import huggingface_hub
from huggingface_hub import HfApi
from huggingface_hub import HfApi, HfFolder
from huggingface_hub.hf_api import DatasetInfo
from packaging import version


Expand Down Expand Up @@ -99,3 +100,77 @@ def delete_repo(
token=token,
repo_type=repo_type,
)


def dataset_info(
hf_api: HfApi,
repo_id: str,
*,
revision: Optional[str] = None,
timeout: Optional[float] = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> DatasetInfo:
"""
The huggingface_hub.HfApi.dataset_info parameters changed in 0.10.0 and some of them were deprecated.
This function checks the huggingface_hub version to call the right parameters.
Args:
hf_api (`huggingface_hub.HfApi`): Hub client
repo_id (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
revision (`str`, *optional*):
The revision of the dataset repository from which to get the
information.
timeout (`float`, *optional*):
Whether to set a timeout for the request to the Hub.
use_auth_token (`bool` or `str`, *optional*):
Whether to use the `auth_token` provided from the
`huggingface_hub` cli. If not logged in, a valid `auth_token`
can be passed in as a string.
Returns:
[`hf_api.DatasetInfo`]: The dataset repository information.
<Tip>
Raises the following errors:
- [`~utils.RepositoryNotFoundError`]
If the repository to download from cannot be found. This may be because it doesn't exist,
or because it is set to `private` and you do not have access.
- [`~utils.RevisionNotFoundError`]
If the revision to download from cannot be found.
</Tip>
"""
if version.parse(huggingface_hub.__version__) < version.parse("0.10.0"):
if use_auth_token is False:
token = "no-token"
elif isinstance(use_auth_token, str):
token = use_auth_token
else:
token = HfFolder.get_token() or "no-token"
return hf_api.dataset_info(
repo_id,
revision=revision,
token=token,
timeout=timeout,
)
else: # the `token` parameter is deprecated in huggingface_hub>=0.10.0
return hf_api.dataset_info(repo_id, revision=revision, timeout=timeout, use_auth_token=use_auth_token)


def list_repo_files(
hf_api: HfApi,
repo_id: str,
revision: Optional[str] = None,
repo_type: Optional[str] = None,
token: Optional[str] = None,
timeout: Optional[float] = None,
) -> List[str]:
"""
The huggingface_hub.HfApi.list_repo_files parameters changed in 0.10.0 and some of them were deprecated.
This function checks the huggingface_hub version to call the right parameters.
"""
if version.parse(huggingface_hub.__version__) < version.parse("0.10.0"):
return hf_api.list_repo_files(repo_id, revision=revision, repo_type=repo_type, token=token, timeout=timeout)
else: # the `token` parameter is deprecated in huggingface_hub>=0.10.0
return hf_api.list_repo_files(
repo_id, revision=revision, repo_type=repo_type, use_auth_token=token, timeout=timeout
)
16 changes: 10 additions & 6 deletions src/datasets/utils/file_utils.py
Expand Up @@ -22,7 +22,9 @@
from typing import List, Optional, Type, TypeVar, Union
from urllib.parse import urljoin, urlparse

import huggingface_hub
import requests
from huggingface_hub import HfFolder

from .. import __version__, config
from ..download.download_config import DownloadConfig
Expand Down Expand Up @@ -218,7 +220,9 @@ def cached_path(


def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = f"datasets/{__version__}; python/{config.PY_VERSION}"
ua = f"datasets/{__version__}"
ua += f"; python/{config.PY_VERSION}"
ua += f"; huggingface_hub/{huggingface_hub.__version__}"
ua += f"; pyarrow/{config.PYARROW_VERSION}"
if config.TORCH_AVAILABLE:
ua += f"; torch/{config.TORCH_VERSION}"
Expand All @@ -239,13 +243,13 @@ def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[
"""Handle the HF authentication"""
headers = {}
if url.startswith(config.HF_ENDPOINT):
token = None
if isinstance(use_auth_token, str):
if use_auth_token is False:
token = None
elif isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
else:
token = HfFolder.get_token()

token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = f"Bearer {token}"
return headers
Expand Down
3 changes: 2 additions & 1 deletion tests/test_filesystem.py
Expand Up @@ -12,6 +12,7 @@
extract_path_from_uri,
is_remote_filesystem,
)
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info

from .utils import require_lz4, require_zstandard

Expand Down Expand Up @@ -93,7 +94,7 @@ def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):

@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
Expand Down
36 changes: 18 additions & 18 deletions tests/test_load.py
Expand Up @@ -756,18 +756,6 @@ def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}


@require_pil
@pytest.mark.integration
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming):
ds = load_dataset(
hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, use_auth_token=hf_token
)
assert isinstance(ds, IterableDataset if streaming else Dataset)
ds_items = list(ds)
assert len(ds_items) == 2


@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"])
def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path):
Expand Down Expand Up @@ -876,20 +864,32 @@ def assert_auth(url, *args, headers, **kwargs):

@pytest.mark.integration
def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data):
with pytest.raises(FileNotFoundError):
load_dataset(hf_private_dataset_repo_txt_data, streaming=True)
ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, use_auth_token=hf_token)
ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True)
assert next(iter(ds)) is not None


@pytest.mark.integration
def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data):
with pytest.raises(FileNotFoundError):
load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True)
ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, use_auth_token=hf_token)
ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True)
assert next(iter(ds)) is not None


@require_pil
@pytest.mark.integration
@pytest.mark.parametrize("implicit_token", [False, True])
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_private_zipped_images(
hf_private_dataset_repo_zipped_img_data, hf_token, streaming, implicit_token
):
use_auth_token = None if implicit_token else hf_token
ds = load_dataset(
hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, use_auth_token=use_auth_token
)
assert isinstance(ds, IterableDataset if streaming else Dataset)
ds_items = list(ds)
assert len(ds_items) == 2


def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog):
cache_dir1 = tmp_path / "cache1"
cache_dir2 = tmp_path / "cache2"
Expand Down
18 changes: 17 additions & 1 deletion tests/test_metric_common.py
Expand Up @@ -38,6 +38,9 @@
UNSUPPORTED_ON_WINDOWS = {"code_eval"}
_on_windows = os.name == "nt"

REQUIRE_TRANSFORMERS = {"bertscore", "frugalscore", "perplexity"}
_has_transformers = importlib.util.find_spec("transformers") is not None


def skip_if_metric_requires_fairseq(test_case):
@wraps(test_case)
Expand All @@ -50,6 +53,17 @@ def wrapper(self, metric_name):
return wrapper


def skip_if_metric_requires_transformers(test_case):
@wraps(test_case)
def wrapper(self, metric_name):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"')
else:
test_case(self, metric_name)

return wrapper


def skip_on_windows_if_not_windows_compatible(test_case):
@wraps(test_case)
def wrapper(self, metric_name):
Expand All @@ -67,7 +81,9 @@ def get_local_metric_names():


@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(skip_if_metric_requires_fairseq, skip_on_windows_if_not_windows_compatible)
@for_all_test_methods(
skip_if_metric_requires_fairseq, skip_if_metric_requires_transformers, skip_on_windows_if_not_windows_compatible
)
@local
@pytest.mark.integration
class LocalMetricTest(parameterized.TestCase):
Expand Down

1 comment on commit 365884d

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Show benchmarks

PyArrow==6.0.0

Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.007981 / 0.011353 (-0.003372) 0.004106 / 0.011008 (-0.006902) 0.029837 / 0.038508 (-0.008672) 0.036109 / 0.023109 (0.012999) 0.302284 / 0.275898 (0.026386) 0.367859 / 0.323480 (0.044379) 0.006231 / 0.007986 (-0.001755) 0.005112 / 0.004328 (0.000783) 0.007154 / 0.004250 (0.002904) 0.051436 / 0.037052 (0.014383) 0.312269 / 0.258489 (0.053780) 0.347585 / 0.293841 (0.053744) 0.031197 / 0.128546 (-0.097349) 0.009877 / 0.075646 (-0.065769) 0.259004 / 0.419271 (-0.160268) 0.051816 / 0.043533 (0.008283) 0.301022 / 0.255139 (0.045883) 0.319854 / 0.283200 (0.036654) 0.115418 / 0.141683 (-0.026265) 1.465369 / 1.452155 (0.013215) 1.523005 / 1.492716 (0.030288)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.285275 / 0.018006 (0.267269) 0.542885 / 0.000490 (0.542395) 0.001132 / 0.000200 (0.000932) 0.000075 / 0.000054 (0.000020)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.022406 / 0.037411 (-0.015005) 0.100017 / 0.014526 (0.085491) 0.112969 / 0.176557 (-0.063588) 0.156502 / 0.737135 (-0.580633) 0.116947 / 0.296338 (-0.179391)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.395358 / 0.215209 (0.180149) 3.938462 / 2.077655 (1.860807) 1.776445 / 1.504120 (0.272325) 1.588889 / 1.541195 (0.047695) 1.655473 / 1.468490 (0.186983) 0.426305 / 4.584777 (-4.158472) 3.748980 / 3.745712 (0.003268) 3.477836 / 5.269862 (-1.792025) 1.820419 / 4.565676 (-2.745257) 0.051596 / 0.424275 (-0.372679) 0.011089 / 0.007607 (0.003482) 0.497939 / 0.226044 (0.271894) 5.012581 / 2.268929 (2.743653) 2.231678 / 55.444624 (-53.212946) 1.888414 / 6.876477 (-4.988063) 2.024293 / 2.142072 (-0.117780) 0.536898 / 4.805227 (-4.268329) 0.118043 / 6.500664 (-6.382621) 0.061344 / 0.075469 (-0.014125)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.478041 / 1.841788 (-0.363747) 14.071862 / 8.074308 (5.997554) 24.966068 / 10.191392 (14.774675) 0.872603 / 0.680424 (0.192179) 0.563055 / 0.534201 (0.028854) 0.388359 / 0.579283 (-0.190924) 0.433684 / 0.434364 (-0.000680) 0.271288 / 0.540337 (-0.269049) 0.269804 / 1.386936 (-1.117132)
PyArrow==latest
Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.006244 / 0.011353 (-0.005109) 0.003981 / 0.011008 (-0.007027) 0.028706 / 0.038508 (-0.009802) 0.034061 / 0.023109 (0.010952) 0.375605 / 0.275898 (0.099707) 0.442825 / 0.323480 (0.119345) 0.004098 / 0.007986 (-0.003887) 0.003505 / 0.004328 (-0.000823) 0.005136 / 0.004250 (0.000885) 0.041698 / 0.037052 (0.004645) 0.383230 / 0.258489 (0.124741) 0.432965 / 0.293841 (0.139124) 0.025484 / 0.128546 (-0.103062) 0.007190 / 0.075646 (-0.068457) 0.262788 / 0.419271 (-0.156483) 0.050203 / 0.043533 (0.006670) 0.374703 / 0.255139 (0.119564) 0.394068 / 0.283200 (0.110869) 0.103334 / 0.141683 (-0.038349) 1.456962 / 1.452155 (0.004807) 1.517421 / 1.492716 (0.024705)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.333476 / 0.018006 (0.315469) 0.521987 / 0.000490 (0.521497) 0.016128 / 0.000200 (0.015928) 0.000308 / 0.000054 (0.000253)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.025123 / 0.037411 (-0.012289) 0.102682 / 0.014526 (0.088156) 0.115085 / 0.176557 (-0.061471) 0.158183 / 0.737135 (-0.578953) 0.118526 / 0.296338 (-0.177812)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.447635 / 0.215209 (0.232426) 4.464295 / 2.077655 (2.386641) 2.242636 / 1.504120 (0.738517) 2.057766 / 1.541195 (0.516571) 2.122844 / 1.468490 (0.654354) 0.428432 / 4.584777 (-4.156344) 3.757355 / 3.745712 (0.011643) 2.063995 / 5.269862 (-3.205866) 1.232325 / 4.565676 (-3.333351) 0.051563 / 0.424275 (-0.372712) 0.010932 / 0.007607 (0.003325) 0.549804 / 0.226044 (0.323759) 5.449171 / 2.268929 (3.180243) 2.704557 / 55.444624 (-52.740068) 2.387378 / 6.876477 (-4.489099) 2.517433 / 2.142072 (0.375360) 0.536910 / 4.805227 (-4.268317) 0.120651 / 6.500664 (-6.380013) 0.061997 / 0.075469 (-0.013472)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.519110 / 1.841788 (-0.322678) 14.077498 / 8.074308 (6.003190) 13.266279 / 10.191392 (3.074887) 0.925450 / 0.680424 (0.245027) 0.600793 / 0.534201 (0.066592) 0.368101 / 0.579283 (-0.211182) 0.428469 / 0.434364 (-0.005895) 0.244848 / 0.540337 (-0.295490) 0.262307 / 1.386936 (-1.124629)

CML watermark

Please sign in to comment.