Skip to content

Commit

Permalink
Revert task removal in folder-based builders (#5051)
Browse files Browse the repository at this point in the history
* Add AudioClassification task

* Add classification task to folder based builders

* Fix tests

* Minor fix

* Minor fix again
  • Loading branch information
mariosasko committed Oct 3, 2022
1 parent a8893e6 commit 583bb7d
Show file tree
Hide file tree
Showing 8 changed files with 79 additions and 2 deletions.
2 changes: 2 additions & 0 deletions docs/source/package_reference/task_templates.mdx
Expand Up @@ -4,6 +4,8 @@ The tasks supported by [`Dataset.prepare_for_task`] and [`DatasetDict.prepare_fo

[[autodoc]] datasets.tasks.AutomaticSpeechRecognition

[[autodoc]] datasets.tasks.AudioClassification

[[autodoc]] datasets.tasks.ImageClassification
- align_with_features

Expand Down
2 changes: 2 additions & 0 deletions src/datasets/packaged_modules/audiofolder/audiofolder.py
@@ -1,6 +1,7 @@
from typing import List

import datasets
from datasets.tasks import AudioClassification

from ..folder_based_builder import folder_based_builder

Expand All @@ -20,6 +21,7 @@ class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")


# Obtained with:
Expand Down
Expand Up @@ -2,14 +2,16 @@
import itertools
import os
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple
from typing import List, Optional, Tuple

import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.json as paj

import datasets
from datasets.features.features import FeatureType
from datasets.tasks.base import TaskTemplate


logger = datasets.utils.logging.get_logger(__name__)
Expand Down Expand Up @@ -62,12 +64,14 @@ class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
will be included in a dataset)
CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
"""

BASE_FEATURE: Any
BASE_FEATURE: FeatureType
BASE_COLUMN_NAME: str
BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
EXTENSIONS: List[str]
CLASSIFICATION_TASK: TaskTemplate

SKIP_CHECKSUM_COMPUTATION_BY_DEFAULT: bool = True
METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
Expand Down Expand Up @@ -214,6 +218,7 @@ def analyze(files_or_archives, downloaded_files_or_dirs, split):
"label": datasets.ClassLabel(names=sorted(labels)),
}
)
self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
else:
self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE})

Expand Down
2 changes: 2 additions & 0 deletions src/datasets/packaged_modules/imagefolder/imagefolder.py
@@ -1,6 +1,7 @@
from typing import List

import datasets
from datasets.tasks import ImageClassification

from ..folder_based_builder import folder_based_builder

Expand All @@ -20,6 +21,7 @@ class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")


# Obtained with:
Expand Down
3 changes: 3 additions & 0 deletions src/datasets/tasks/__init__.py
@@ -1,6 +1,7 @@
from typing import Optional

from ..utils.logging import get_logger
from .audio_classificiation import AudioClassification
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
Expand All @@ -12,6 +13,7 @@

__all__ = [
"AutomaticSpeechRecognition",
"AudioClassification",
"ImageClassification",
"LanguageModeling",
"QuestionAnsweringExtractive",
Expand All @@ -25,6 +27,7 @@

NAME2TEMPLATE = {
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
AudioClassification.task: AudioClassification,
ImageClassification.task: ImageClassification,
LanguageModeling.task: LanguageModeling,
QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
Expand Down
33 changes: 33 additions & 0 deletions src/datasets/tasks/audio_classificiation.py
@@ -0,0 +1,33 @@
import copy
from dataclasses import dataclass
from typing import ClassVar, Dict

from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate


@dataclass(frozen=True)
class AudioClassification(TaskTemplate):
task: str = "audio-classification"
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
audio_column: str = "audio"
label_column: str = "labels"

def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template

@property
def column_mapping(self) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
2 changes: 2 additions & 0 deletions tests/packaged_modules/test_folder_based_builder.py
Expand Up @@ -11,13 +11,15 @@
FolderBasedBuilder,
FolderBasedBuilderConfig,
)
from datasets.tasks import TextClassification


class DummyFolderBasedBuilder(FolderBasedBuilder):
BASE_FEATURE = None
BASE_COLUMN_NAME = "base"
BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig
EXTENSIONS = [".txt"]
CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label")


@pytest.fixture
Expand Down
28 changes: 28 additions & 0 deletions tests/test_tasks.py
Expand Up @@ -5,6 +5,7 @@
from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value
from datasets.info import DatasetInfo
from datasets.tasks import (
AudioClassification,
AutomaticSpeechRecognition,
ImageClassification,
LanguageModeling,
Expand Down Expand Up @@ -126,6 +127,33 @@ def test_from_dict(self):
self.assertEqual(label_schema, task.label_schema)


class AudioClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])

def test_column_mapping(self):
task = AudioClassification(audio_column="input_audio", label_column="input_label")
self.assertDictEqual({"input_audio": "audio", "input_label": "labels"}, task.column_mapping)

def test_from_dict(self):
input_schema = Features({"audio": Audio()})
label_schema = Features({"labels": ClassLabel})
template_dict = {
"audio_column": "input_image",
"label_column": "input_label",
}
task = AudioClassification.from_dict(template_dict)
self.assertEqual("audio-classification", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)

def test_align_with_features(self):
task = AudioClassification(audio_column="input_audio", label_column="input_label")
self.assertEqual(task.label_schema["labels"], ClassLabel)
task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)}))
self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels))


class ImageClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])
Expand Down

1 comment on commit 583bb7d

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Show benchmarks

PyArrow==6.0.0

Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.007389 / 0.011353 (-0.003964) 0.003539 / 0.011008 (-0.007469) 0.028543 / 0.038508 (-0.009966) 0.029249 / 0.023109 (0.006139) 0.299829 / 0.275898 (0.023931) 0.355426 / 0.323480 (0.031946) 0.005475 / 0.007986 (-0.002511) 0.002889 / 0.004328 (-0.001440) 0.006616 / 0.004250 (0.002365) 0.035083 / 0.037052 (-0.001969) 0.317896 / 0.258489 (0.059407) 0.346660 / 0.293841 (0.052819) 0.028492 / 0.128546 (-0.100054) 0.009124 / 0.075646 (-0.066522) 0.247772 / 0.419271 (-0.171500) 0.044673 / 0.043533 (0.001140) 0.310499 / 0.255139 (0.055360) 0.332844 / 0.283200 (0.049644) 0.086053 / 0.141683 (-0.055629) 1.500813 / 1.452155 (0.048658) 1.527323 / 1.492716 (0.034607)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.197498 / 0.018006 (0.179492) 0.421684 / 0.000490 (0.421194) 0.001538 / 0.000200 (0.001338) 0.000096 / 0.000054 (0.000041)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.021185 / 0.037411 (-0.016226) 0.093097 / 0.014526 (0.078571) 0.102221 / 0.176557 (-0.074336) 0.141885 / 0.737135 (-0.595250) 0.107023 / 0.296338 (-0.189315)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.412324 / 0.215209 (0.197115) 4.117832 / 2.077655 (2.040178) 1.844963 / 1.504120 (0.340843) 1.642969 / 1.541195 (0.101774) 1.663773 / 1.468490 (0.195283) 0.444021 / 4.584777 (-4.140756) 3.382696 / 3.745712 (-0.363016) 1.836548 / 5.269862 (-3.433313) 1.235262 / 4.565676 (-3.330414) 0.052794 / 0.424275 (-0.371481) 0.010642 / 0.007607 (0.003035) 0.519627 / 0.226044 (0.293583) 5.173118 / 2.268929 (2.904189) 2.267780 / 55.444624 (-53.176845) 1.925724 / 6.876477 (-4.950753) 1.993103 / 2.142072 (-0.148969) 0.559586 / 4.805227 (-4.245641) 0.117432 / 6.500664 (-6.383232) 0.062397 / 0.075469 (-0.013072)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.520915 / 1.841788 (-0.320873) 12.273558 / 8.074308 (4.199250) 25.994895 / 10.191392 (15.803503) 0.864385 / 0.680424 (0.183962) 0.597116 / 0.534201 (0.062915) 0.346851 / 0.579283 (-0.232432) 0.412917 / 0.434364 (-0.021447) 0.238993 / 0.540337 (-0.301344) 0.245234 / 1.386936 (-1.141702)
PyArrow==latest
Show updated benchmarks!

Benchmark: benchmark_array_xd.json

metric read_batch_formatted_as_numpy after write_array2d read_batch_formatted_as_numpy after write_flattened_sequence read_batch_formatted_as_numpy after write_nested_sequence read_batch_unformated after write_array2d read_batch_unformated after write_flattened_sequence read_batch_unformated after write_nested_sequence read_col_formatted_as_numpy after write_array2d read_col_formatted_as_numpy after write_flattened_sequence read_col_formatted_as_numpy after write_nested_sequence read_col_unformated after write_array2d read_col_unformated after write_flattened_sequence read_col_unformated after write_nested_sequence read_formatted_as_numpy after write_array2d read_formatted_as_numpy after write_flattened_sequence read_formatted_as_numpy after write_nested_sequence read_unformated after write_array2d read_unformated after write_flattened_sequence read_unformated after write_nested_sequence write_array2d write_flattened_sequence write_nested_sequence
new / old (diff) 0.005333 / 0.011353 (-0.006020) 0.003619 / 0.011008 (-0.007389) 0.026543 / 0.038508 (-0.011965) 0.027511 / 0.023109 (0.004402) 0.386050 / 0.275898 (0.110152) 0.439711 / 0.323480 (0.116231) 0.003269 / 0.007986 (-0.004717) 0.002901 / 0.004328 (-0.001427) 0.004529 / 0.004250 (0.000278) 0.031995 / 0.037052 (-0.005057) 0.376139 / 0.258489 (0.117649) 0.437255 / 0.293841 (0.143414) 0.023914 / 0.128546 (-0.104632) 0.006539 / 0.075646 (-0.069107) 0.243424 / 0.419271 (-0.175848) 0.042177 / 0.043533 (-0.001356) 0.407467 / 0.255139 (0.152328) 0.432688 / 0.283200 (0.149489) 0.084102 / 0.141683 (-0.057581) 1.510014 / 1.452155 (0.057859) 1.573489 / 1.492716 (0.080773)

Benchmark: benchmark_getitem_100B.json

metric get_batch_of_1024_random_rows get_batch_of_1024_rows get_first_row get_last_row
new / old (diff) 0.218256 / 0.018006 (0.200250) 0.416127 / 0.000490 (0.415638) 0.000982 / 0.000200 (0.000783) 0.000075 / 0.000054 (0.000020)

Benchmark: benchmark_indices_mapping.json

metric select shard shuffle sort train_test_split
new / old (diff) 0.020338 / 0.037411 (-0.017074) 0.092068 / 0.014526 (0.077542) 0.103186 / 0.176557 (-0.073371) 0.144894 / 0.737135 (-0.592242) 0.103814 / 0.296338 (-0.192525)

Benchmark: benchmark_iterating.json

metric read 5000 read 50000 read_batch 50000 10 read_batch 50000 100 read_batch 50000 1000 read_formatted numpy 5000 read_formatted pandas 5000 read_formatted tensorflow 5000 read_formatted torch 5000 read_formatted_batch numpy 5000 10 read_formatted_batch numpy 5000 1000 shuffled read 5000 shuffled read 50000 shuffled read_batch 50000 10 shuffled read_batch 50000 100 shuffled read_batch 50000 1000 shuffled read_formatted numpy 5000 shuffled read_formatted_batch numpy 5000 10 shuffled read_formatted_batch numpy 5000 1000
new / old (diff) 0.449504 / 0.215209 (0.234295) 4.501334 / 2.077655 (2.423679) 2.180486 / 1.504120 (0.676366) 1.991764 / 1.541195 (0.450570) 2.014903 / 1.468490 (0.546413) 0.443357 / 4.584777 (-4.141420) 3.354224 / 3.745712 (-0.391488) 1.791716 / 5.269862 (-3.478146) 1.087549 / 4.565676 (-3.478128) 0.052565 / 0.424275 (-0.371710) 0.010853 / 0.007607 (0.003246) 0.548253 / 0.226044 (0.322209) 5.508585 / 2.268929 (3.239657) 2.609058 / 55.444624 (-52.835566) 2.259440 / 6.876477 (-4.617037) 2.341444 / 2.142072 (0.199371) 0.558038 / 4.805227 (-4.247190) 0.118155 / 6.500664 (-6.382509) 0.062490 / 0.075469 (-0.012979)

Benchmark: benchmark_map_filter.json

metric filter map fast-tokenizer batched map identity map identity batched map no-op batched map no-op batched numpy map no-op batched pandas map no-op batched pytorch map no-op batched tensorflow
new / old (diff) 1.546971 / 1.841788 (-0.294817) 12.269521 / 8.074308 (4.195213) 11.938951 / 10.191392 (1.747559) 0.903862 / 0.680424 (0.223438) 0.625909 / 0.534201 (0.091708) 0.332159 / 0.579283 (-0.247124) 0.386669 / 0.434364 (-0.047694) 0.217306 / 0.540337 (-0.323031) 0.229992 / 1.386936 (-1.156944)

CML watermark

Please sign in to comment.