No module named 'datasets.tasks'
ModuleNotFoundError Traceback (most recent call last)
Cell In[2], line 3
1 from datasets import load_dataset
----> 3 dataset = load_dataset('knowledgator/events_classification_biotech')
5 classes = [class_ for class_ in dataset['train'].features['label 1'].names if class_]
6 class2id = {class_:id for id, class_ in enumerate(classes)}
File /opt/anaconda3/lib/python3.12/site-packages/datasets/load.py:2132, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
2127 verification_mode = VerificationMode(
2128 (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
2129 )
2131 # Create a dataset builder
-> 2132 builder_instance = load_dataset_builder(
2133 path=path,
2134 name=name,
2135 data_dir=data_dir,
2136 data_files=data_files,
2137 cache_dir=cache_dir,
2138 features=features,
2139 download_config=download_config,
2140 download_mode=download_mode,
2141 revision=revision,
2142 token=token,
2143 storage_options=storage_options,
2144 trust_remote_code=trust_remote_code,
2145 _require_default_config_name=name is None,
2146 **config_kwargs,
2147 )
2149 # Return iterable dataset in case of streaming
2150 if streaming:
File /opt/anaconda3/lib/python3.12/site-packages/datasets/load.py:1888, in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, storage_options, trust_remote_code, _require_default_config_name, **config_kwargs)
1885 error_msg += f'\nFor example data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}
'
1886 raise ValueError(error_msg)
-> 1888 builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
1889 # Instantiate the dataset builder
1890 builder_instance: DatasetBuilder = builder_cls(
1891 cache_dir=cache_dir,
1892 dataset_name=dataset_name,
(...)
1902 **config_kwargs,
1903 )
File /opt/anaconda3/lib/python3.12/site-packages/datasets/load.py:248, in get_dataset_builder_class(dataset_module, dataset_name)
242 def get_dataset_builder_class(
243 dataset_module: "DatasetModule", dataset_name: Optional[str] = None
244 ) -> Type[DatasetBuilder]:
245 with lock_importable_file(
246 dataset_module.importable_file_path
247 ) if dataset_module.importable_file_path else nullcontext():
--> 248 builder_cls = import_main_class(dataset_module.module_path)
249 if dataset_module.builder_configs_parameters.builder_configs:
250 dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name")
File /opt/anaconda3/lib/python3.12/site-packages/datasets/load.py:169, in import_main_class(module_path)
167 def import_main_class(module_path) -> Optional[Type[DatasetBuilder]]:
168 """Import a module at module_path and return its main class: a DatasetBuilder"""
--> 169 module = importlib.import_module(module_path)
170 # Find the main class in our imported module
171 module_main_cls = None
File /opt/anaconda3/lib/python3.12/importlib/init.py:90, in import_module(name, package)
88 break
89 level += 1
---> 90 return _bootstrap._gcd_import(name[level:], package, level)
File :1387, in _gcd_import(name, package, level)
File :1360, in find_and_load(name, import)
File :1331, in find_and_load_unlocked(name, import)
File :935, in _load_unlocked(spec)
File :995, in exec_module(self, module)
File :488, in _call_with_frames_removed(f, *args, **kwds)
File ~/.cache/huggingface/modules/datasets_modules/datasets/knowledgator--events_classification_biotech/9c8086d498c3104de3a3c5b6640837e18ccd829dcaca49f1cdffe3eb5c4a6361/events_classification_biotech.py:3
1 import datasets
2 from datasets import load_dataset
----> 3 from datasets.tasks import TextClassification
5 DESCRIPTION = """
6 Text classification is a widespread task and a foundational step in numerous information extraction pipelines. However, a notable challenge in current NLP research lies in the oversimplification of benchmarking datasets, which predominantly focus on rudimentary tasks such as topic classification or sentiment analysis.
7
(...)
10 A distinctive feature of this dataset is its emphasis on not only identifying the overarching theme but also extracting information about the target companies associated with the news. This dual-layered approach enhances the dataset's utility for applications that require a deeper understanding of the relationships between events, companies, and the biotech industry as a whole.
11 """
13 labels = ['event organization',
14 'executive statement',
15 'regulatory approval',
(...)
41 'funding round',
42 'patent publication']
ModuleNotFoundError: No module named 'datasets.tasks'