code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self : str , lowercase__ : Any ):
'''simple docstring'''
a_ : Any = data
a_ : Node | None = None
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
a_ : List[Any] = None
a_ : Tuple = None
def __iter__( self : Optional[Any] ):
'''simple docstring'''
a_ : List[Any] = self.head
while self.head:
yield node.data
a_ : Optional[int] = node.next
if node == self.head:
break
def __len__( self : Any ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return "->".join(str(lowercase__ ) for item in iter(self ) )
def lowercase_ ( self : int , lowercase__ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , lowercase__ )
def lowercase_ ( self : int , lowercase__ : Any ):
'''simple docstring'''
self.insert_nth(0 , lowercase__ )
def lowercase_ ( self : Tuple , lowercase__ : int , lowercase__ : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
a_ : Optional[Any] = Node(lowercase__ )
if self.head is None:
a_ : List[Any] = new_node # first node points itself
a_ : str = new_node
elif index == 0: # insert at head
a_ : int = self.head
a_ : Tuple = new_node
else:
a_ : int = self.head
for _ in range(index - 1 ):
a_ : str = temp.next
a_ : List[str] = temp.next
a_ : Dict = new_node
if index == len(self ) - 1: # insert at tail
a_ : Tuple = new_node
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.delete_nth(0 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowercase_ ( self : List[str] , lowercase__ : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
a_ : Tuple = self.head
if self.head == self.tail: # just one node
a_ : Dict = None
elif index == 0: # delete head node
a_ : Union[str, Any] = self.tail.next.next
a_ : Optional[Any] = self.head.next
else:
a_ : int = self.head
for _ in range(index - 1 ):
a_ : List[Any] = temp.next
a_ : str = temp.next
a_ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
a_ : Any = temp
return delete_node.data
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return len(self ) == 0
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : int = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 442 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase_ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
__magic_name__ : Optional[datasets.Features] = None
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
a_ : Any = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
a_ : int = df_with_partition_id.select("""*""" ).where(F"part_id = {partition_id}" ).drop("""part_id""" )
a_ : str = partition_df.collect()
a_ : List[str] = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self : Dict , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : Optional[int]=None , ):
'''simple docstring'''
a_ : Tuple = df
a_ : Dict = partition_order or range(self.df.rdd.getNumPartitions() )
a_ : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def lowercase_ ( self : Dict , lowercase__ : np.random.Generator ):
'''simple docstring'''
a_ : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
def lowercase_ ( self : Any , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
a_ : Optional[int] = self.split_shard_indices_by_worker(lowercase__ , lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return len(self.partition_order )
class SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
__magic_name__ : Optional[int] = SparkConfig
def __init__( self : Optional[Any] , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : str = None , lowercase__ : str = None , **lowercase__ : Dict , ):
'''simple docstring'''
import pyspark
a_ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
a_ : List[str] = df
a_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase__ , config_name=str(self.df.semanticHash() ) , **lowercase__ , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def create_cache_and_write_probe(lowercase__ : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase__ )
a_ : List[Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Tuple , lowercase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase_ ( self : Any , lowercase__ : int ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowercase__ : List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
a_ : Union[str, Any] = self.df.count()
a_ : List[str] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a_ : str = (
self.df.limit(lowercase__ )
.repartition(1 )
.mapInArrow(lowercase__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a_ : List[str] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a_ : List[str] = min(lowercase__ , int(approx_total_size / max_shard_size ) )
a_ : Union[str, Any] = self.df.repartition(lowercase__ )
def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : int , ):
'''simple docstring'''
import pyspark
a_ : str = ParquetWriter if file_format == """parquet""" else ArrowWriter
a_ : int = os.path.join(self._working_dir , os.path.basename(lowercase__ ) ) if self._working_dir else fpath
a_ : Optional[int] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a_ : int = self.config.features
a_ : List[str] = self._writer_batch_size
a_ : str = self._fs.storage_options
def write_arrow(lowercase__ : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a_ : int = pyspark.TaskContext().taskAttemptId()
a_ : List[str] = next(lowercase__ , lowercase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
a_ : List[str] = 0
a_ : Union[str, Any] = writer_class(
features=lowercase__ , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
a_ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a_ , a_ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
a_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
a_ : str = pa.Table.from_batches([batch] )
writer.write_table(lowercase__ )
if writer._num_bytes > 0:
a_ , a_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase__ ) ):
a_ : str = os.path.join(os.path.dirname(lowercase__ ) , os.path.basename(lowercase__ ) )
shutil.move(lowercase__ , lowercase__ )
a_ : Any = (
self.df.mapInArrow(lowercase__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase_ ( self : Union[str, Any] , lowercase__ : "datasets.SplitGenerator" , lowercase__ : str = "arrow" , lowercase__ : Optional[Union[str, int]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Optional[int] , ):
'''simple docstring'''
self._validate_cache_dir()
a_ : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase__ )
a_ : Any = not is_remote_filesystem(self._fs )
a_ : str = os.path.join if is_local else posixpath.join
a_ : Optional[Any] = """-TTTTT-SSSSS-of-NNNNN"""
a_ : int = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
a_ : Optional[Any] = path_join(self._output_dir , lowercase__ )
a_ : Dict = 0
a_ : Optional[Any] = 0
a_ : Dict = 0
a_ : List[str] = []
a_ : Dict = []
for task_id, content in self._prepare_split_single(lowercase__ , lowercase__ , lowercase__ ):
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase__ )
a_ : List[Any] = total_num_examples
a_ : int = total_num_bytes
# should rename everything at the end
logger.debug(F"Renaming {total_shards} shards." )
if total_shards > 1:
a_ : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a_ : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase__ : int , lowercase__ : int , lowercase__ : int , ):
rename(
lowercase__ , fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace("""TTTTT-SSSSS""" , F"{global_shard_id:05d}" ).replace("""NNNNN""" , F"{total_shards:05d}" ) , )
a_ : int = []
a_ : List[str] = 0
for i in range(len(lowercase__ ) ):
a_ , a_ : List[str] = task_id_and_num_shards[i]
for shard_id in range(lowercase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase__ , len(lowercase__ ) ).map(lambda lowercase__ : _rename_shard(*lowercase__ ) ).collect()
else:
# don't use any pattern
a_ : int = 0
a_ : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace(lowercase__ , """""" ) , )
def lowercase_ ( self : Tuple , lowercase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 442 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=2 , __lowercase=24 , __lowercase=16 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=None , __lowercase=2 , __lowercase=2 , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def A__ ( self ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def A__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A__ ( self , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = ASTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : int = False
__lowercase : str = False
__lowercase : List[Any] = False
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A__ ( self ):
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__lowercase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""input_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@slow
def A__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def snake_case__ ( ) ->Any:
UpperCAmelCase__ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def A__ ( self ):
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__lowercase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(__lowercase , sampling_rate=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__lowercase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCAmelCase__ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 422 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case__ ( ) ->Optional[Any]:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase__ = [1, 2, 3]
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("""unsupported backend""" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("""unsupported backend""" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple:
UpperCAmelCase__ = [1, 2]
UpperCAmelCase__ = {"""a""": 1, """b""": 2}
UpperCAmelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCAmelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCAmelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCAmelCase__ = [2, 3]
UpperCAmelCase__ = {"""a""": 2, """b""": 3}
UpperCAmelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCAmelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCAmelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 422 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 291 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_A = logging.getLogger(__name__)
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = np.argmax(_A , axis=1 )
return np.sum(outputs == labels )
def __UpperCamelCase ( _A ):
with open(_A , encoding='''utf_8''' ) as f:
lowerCAmelCase_ = csv.reader(_A )
lowerCAmelCase_ = []
next(_A ) # skip the first line
for line in tqdm(_A ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A ):
lowerCAmelCase_ = []
for dataset in encoded_datasets:
lowerCAmelCase_ = len(_A )
lowerCAmelCase_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase_ = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase_ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_A ):
lowerCAmelCase_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase_ = with_conta
lowerCAmelCase_ = with_conta
lowerCAmelCase_ = len(_A ) - 1
lowerCAmelCase_ = len(_A ) - 1
lowerCAmelCase_ = with_conta
lowerCAmelCase_ = with_conta
lowerCAmelCase_ = mc_label
lowerCAmelCase_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_A ) for t in all_inputs ) )
return tensor_datasets
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_A , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_A , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_A , default='''''' )
parser.add_argument('''--seed''' , type=_A , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_A , default=3 )
parser.add_argument('''--train_batch_size''' , type=_A , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_A , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_A , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_A , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_A , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_A , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_A , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=_A , default=0.9 )
parser.add_argument('''--n_valid''' , type=_A , default=374 )
parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase_ = parser.parse_args()
print(_A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_A , _A ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase_ = ['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCAmelCase_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_A )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_A )
lowerCAmelCase_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_A ) )
model.to(_A )
# Load and encode the datasets
def tokenize_and_encode(_A ):
if isinstance(_A , _A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_A ) )
elif isinstance(_A , _A ):
return obj
return [tokenize_and_encode(_A ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCAmelCase_ = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase_ = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase_ = (train_dataset, eval_dataset)
lowerCAmelCase_ = tokenize_and_encode(_A )
# Compute the max input length for the Transformer
lowerCAmelCase_ = model.config.n_positions // 2 - 2
lowerCAmelCase_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase_ = min(_A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase_ = pre_process_datasets(_A , _A , _A , *_A )
lowerCAmelCase_ , lowerCAmelCase_ = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase_ = TensorDataset(*_A )
lowerCAmelCase_ = RandomSampler(_A )
lowerCAmelCase_ = DataLoader(_A , sampler=_A , batch_size=args.train_batch_size )
lowerCAmelCase_ = TensorDataset(*_A )
lowerCAmelCase_ = SequentialSampler(_A )
lowerCAmelCase_ = DataLoader(_A , sampler=_A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase_ = args.max_steps
lowerCAmelCase_ = args.max_steps // (len(_A ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase_ = len(_A ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase_ = list(model.named_parameters() )
lowerCAmelCase_ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCAmelCase_ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCAmelCase_ = AdamW(_A , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase_ = get_linear_schedule_with_warmup(
_A , num_warmup_steps=args.warmup_steps , num_training_steps=_A )
if args.do_train:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = tqdm(_A , desc='''Training''' )
for step, batch in enumerate(_A ):
lowerCAmelCase_ = tuple(t.to(_A ) for t in batch )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = batch
lowerCAmelCase_ = model(_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
lowerCAmelCase_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase_ = '''Training loss: {:.2e} lr: {:.2e}'''.format(_A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase_ = model.module if hasattr(_A , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase_ = os.path.join(args.output_dir , _A )
lowerCAmelCase_ = os.path.join(args.output_dir , _A )
torch.save(model_to_save.state_dict() , _A )
model_to_save.config.to_json_file(_A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_A )
if args.do_eval:
model.eval()
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
for batch in tqdm(_A , desc='''Evaluating''' ):
lowerCAmelCase_ = tuple(t.to(_A ) for t in batch )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = batch
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = model(
_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
lowerCAmelCase_ = mc_logits.detach().cpu().numpy()
lowerCAmelCase_ = mc_labels.to('''cpu''' ).numpy()
lowerCAmelCase_ = accuracy(_A , _A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase_ = eval_loss / nb_eval_steps
lowerCAmelCase_ = eval_accuracy / nb_eval_examples
lowerCAmelCase_ = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase_ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCAmelCase_ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 431 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Dict , *a_ : Dict , **a_ : Any ):
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 680 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> str:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[Any] = 16
_snake_case : Dict = 32
def _A ( __snake_case :Accelerator , __snake_case :int = 16 ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case :str ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case :Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : List[str] = mocked_dataloaders # noqa: F811
def _A ( __snake_case :Optional[Any] , __snake_case :str ) -> Optional[int]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __snake_case ) == "1":
__SCREAMING_SNAKE_CASE = 2
# New Code #
__SCREAMING_SNAKE_CASE = int(args.gradient_accumulation_steps )
__SCREAMING_SNAKE_CASE = int(args.local_sgd_steps )
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
set_seed(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __snake_case )
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__snake_case , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__snake_case , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 214 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =LEDConfig
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ="""gelu"""
def __init__( self, _a, _a=13, _a=7, _a=True, _a=False, _a=99, _a=32, _a=2, _a=4, _a=37, _a=0.1, _a=0.1, _a=20, _a=2, _a=1, _a=0, _a=4, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor], axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_a, _a, _a )
__SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]], axis=-1, )
__SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDModel(config=_a ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, use_cache=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens], axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a )[0]
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a, _a, rtol=1E-3 )
def _A ( __snake_case :Any , __snake_case :Dict , __snake_case :List[Any] , __snake_case :List[Any]=None , __snake_case :Optional[Any]=None , __snake_case :Any=None , __snake_case :List[str]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(_a ):
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
__SCREAMING_SNAKE_CASE = len(_a )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_a )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_a, _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(_a ) )
self.assertEqual(model.config.output_hidden_states, _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def _A ( __snake_case :Optional[int] ) -> List[Any]:
"""simple docstring"""
return tf.constant(__snake_case , dtype=tf.intaa )
_snake_case : int = 1e-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, 7_68)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config, _a, _a )
__SCREAMING_SNAKE_CASE = model(**_a )[0]
__SCREAMING_SNAKE_CASE = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape, _a )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], )
tf.debugging.assert_near(output[:, :3, :3], _a, atol=1E-3, rtol=1E-3 )
| 214 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = (UnCLIPScheduler,)
def UpperCAmelCase ( self : Optional[Any] ,**_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_snake_case )
return config
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_snake_case )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_snake_case )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_snake_case ,prev_timestep=_snake_case )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase__ : List[Any] = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase__ : Optional[Any] = scheduler_class(**_snake_case )
lowercase__ : str = 0.5
assert scheduler._get_variance(1 ,predicted_variance=_snake_case ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 ,predicted_variance=_snake_case ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 ,predicted_variance=_snake_case ) - -0.001_0011 < 1e-5
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : List[str] = scheduler_class(**_snake_case )
lowercase__ : Optional[Any] = scheduler.timesteps
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(_snake_case ):
# 1. predict noise residual
lowercase__ : Optional[Any] = model(_snake_case ,_snake_case )
# 2. predict previous mean of sample x_t-1
lowercase__ : Union[str, Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
lowercase__ : str = pred_prev_sample
lowercase__ : Any = torch.sum(torch.abs(_snake_case ) )
lowercase__ : str = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(25 )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Any = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0 )
for i, t in enumerate(_snake_case ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(_snake_case ,_snake_case )
if i + 1 == timesteps.shape[0]:
lowercase__ : str = None
else:
lowercase__ : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ : Dict = scheduler.step(
_snake_case ,_snake_case ,_snake_case ,prev_timestep=_snake_case ,generator=_snake_case ).prev_sample
lowercase__ : Optional[Any] = pred_prev_sample
lowercase__ : Tuple = torch.sum(torch.abs(_snake_case ) )
lowercase__ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
pass
| 560 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "efficientnet"
def __init__( self : Optional[Any] ,_snake_case : int = 3 ,_snake_case : int = 600 ,_snake_case : float = 2.0 ,_snake_case : float = 3.1 ,_snake_case : int = 8 ,_snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] ,_snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] ,_snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] ,_snake_case : List[int] = [] ,_snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] ,_snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] ,_snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] ,_snake_case : float = 0.25 ,_snake_case : str = "swish" ,_snake_case : int = 2_560 ,_snake_case : str = "mean" ,_snake_case : float = 0.02 ,_snake_case : float = 0.001 ,_snake_case : float = 0.99 ,_snake_case : float = 0.5 ,_snake_case : float = 0.2 ,**_snake_case : List[str] ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = width_coefficient
lowercase__ : Dict = depth_coefficient
lowercase__ : Optional[int] = depth_divisor
lowercase__ : Optional[int] = kernel_sizes
lowercase__ : str = in_channels
lowercase__ : Any = out_channels
lowercase__ : Union[str, Any] = depthwise_padding
lowercase__ : str = strides
lowercase__ : List[str] = num_block_repeats
lowercase__ : List[str] = expand_ratios
lowercase__ : List[str] = squeeze_expansion_ratio
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Any = hidden_dim
lowercase__ : Optional[int] = pooling_type
lowercase__ : List[str] = initializer_range
lowercase__ : List[Any] = batch_norm_eps
lowercase__ : List[Any] = batch_norm_momentum
lowercase__ : Tuple = dropout_rate
lowercase__ : Tuple = drop_connect_rate
lowercase__ : Union[str, Any] = sum(_snake_case ) * 4
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
| 560 | 1 |
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : str = """docs/source/en/_toctree.yml"""
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = defaultdict(snake_case )
__A = []
__A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(snake_case )
__A = new_doc_list
__A = [key for key, value in counts.items() if value > 1]
__A = []
for duplicate_key in duplicates:
__A = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__A = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def __UpperCamelCase ( snake_case=False ) -> List[str]:
'''simple docstring'''
with open(snake_case , encoding='''utf-8''' ) as f:
__A = yaml.safe_load(f.read() )
# Get to the API doc
__A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A = content[api_idx]['''sections''']
# Then to the model doc
__A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__A = api_doc[scheduler_idx]['''sections''']
__A = clean_doc_toc(snake_case )
__A = False
if new_scheduler_doc != scheduler_doc:
__A = True
if overwrite:
__A = new_scheduler_doc
if diff:
if overwrite:
__A = api_doc
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __UpperCamelCase ( snake_case=False ) -> str:
'''simple docstring'''
with open(snake_case , encoding='''utf-8''' ) as f:
__A = yaml.safe_load(f.read() )
# Get to the API doc
__A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A = content[api_idx]['''sections''']
# Then to the model doc
__A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__A = False
__A = api_doc[pipeline_idx]['''sections''']
__A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__A = pipeline_doc['''section''']
__A = clean_doc_toc(snake_case )
if overwrite:
__A = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
__A = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
__A = True
if overwrite:
__A = new_pipeline_docs
if diff:
if overwrite:
__A = api_doc
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCamelCase : List[str] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 341 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( snake_case , snake_case ) -> List[str]:
'''simple docstring'''
__A = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
'''simple docstring'''
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(snake_case ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(snake_case )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(snake_case , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(snake_case )
__A = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(snake_case , snake_case )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a_ = pd.read_csv('sample_data.csv', header=None)
a_ = df.shape[:1][0]
# If you're using some other dataset input the target column
a_ = df.iloc[:, 1:2]
a_ = actual_data.values.reshape(len_data, 1)
a_ = MinMaxScaler().fit_transform(actual_data)
a_ = 1_0
a_ = 5
a_ = 2_0
a_ = len_data - periods * look_back
a_ = actual_data[:division]
a_ = actual_data[division - look_back :]
a_ , a_ = [], []
a_ , a_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a_ = np.array(train_x)
a_ = np.array(test_x)
a_ = np.array([list(i.ravel()) for i in train_y])
a_ = np.array([list(i.ravel()) for i in test_y])
a_ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
a_ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
a_ = model.predict(x_test)
| 76 | """simple docstring"""
from math import sqrt
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ =True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ =False
for divisor in range(2 , int(round(sqrt(__lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ =False
break
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ =list(range(2 , n + 1 ) )
lowerCamelCase__ =[] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ =0
# filters actual prime numbers.
lowerCamelCase__ =[x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ =[]
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__lowerCAmelCase ):
ans.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ =[] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ =2
lowerCamelCase__ =number
if number == 0 or number == 1:
ans.append(__lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowerCAmelCase ):
while quotient != 1:
if is_prime(__lowerCAmelCase ) and (quotient % factor == 0):
ans.append(__lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ =0
# prime factorization of 'number'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =max(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ =0
# prime factorization of 'number'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =min(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (number > 2) and is_even(__lowerCAmelCase )
), "'number' must been an int, even and > 2"
lowerCamelCase__ =[] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ =get_prime_numbers(__lowerCAmelCase )
lowerCamelCase__ =len(__lowerCAmelCase )
# run variable for while-loops.
lowerCamelCase__ =0
lowerCamelCase__ =None
# exit variable. for break up the loops
lowerCamelCase__ =True
while i < len_pn and loop:
lowerCamelCase__ =i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ =False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (len(__lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ =0
while numbera != 0:
lowerCamelCase__ =numbera % numbera
lowerCamelCase__ =numbera
lowerCamelCase__ =rest
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ =1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ =[]
lowerCamelCase__ =[]
lowerCamelCase__ =max(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =0
lowerCamelCase__ =0
lowerCamelCase__ =[] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(max(__lowerCAmelCase , __lowerCAmelCase ) ):
ans *= n
else:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
ans *= n
done.append(__lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
ans *= n
done.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ =0
lowerCamelCase__ =2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and is_prime(
__lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
assert (
is_prime(__lowerCAmelCase ) and is_prime(__lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ =p_number_a + 1 # jump to the next number
lowerCamelCase__ =[] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(__lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(__lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(__lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ =[] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(__lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ =get_divisors(__lowerCAmelCase )
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(__lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ =gcd(abs(__lowerCAmelCase ) , abs(__lowerCAmelCase ) )
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ =1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ =0
lowerCamelCase__ =1
lowerCamelCase__ =1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ =ans
ans += fiba
lowerCamelCase__ =tmp
return ans
| 530 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''distilbert'''
lowercase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Optional[int] , UpperCamelCase__ : int=30522 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=4 * 768 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.2 , UpperCamelCase__ : str=0 , **UpperCamelCase__ : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =sinusoidal_pos_embds
__UpperCamelCase =n_layers
__UpperCamelCase =n_heads
__UpperCamelCase =dim
__UpperCamelCase =hidden_dim
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation
__UpperCamelCase =initializer_range
__UpperCamelCase =qa_dropout
__UpperCamelCase =seq_classif_dropout
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase )
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 707 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ChineseCLIPImageProcessor'''
lowercase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
__UpperCamelCase =kwargs.pop('''feature_extractor''' )
__UpperCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCamelCase =self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
__UpperCamelCase =self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
__UpperCamelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
__UpperCamelCase =self.tokenizer.model_input_names
__UpperCamelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
| 296 | 0 |
"""simple docstring"""
a :Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def _lowercase ( __lowerCAmelCase ) -> List[str]:
# Make sure the supplied data is a bytes-like object
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = """""".join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE__ : int = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE__ : Union[str, Any] = B"""=""" * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case__ ) , 6 ) ).encode()
+ padding
)
def _lowercase ( __lowerCAmelCase ) -> str:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ , snake_case__ ):
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoded_data[:-padding]
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE__ : Dict = """""".join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case__ ) , 8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a ( snake_case__: List[Any] ):
'''simple docstring'''
if "cls_token" in name:
lowercase_ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
lowercase_ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
lowercase_ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase_ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase_ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
lowercase_ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase_ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
lowercase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase_ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase_ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase_ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a ( snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
lowercase_ = key.split('''.''' )
lowercase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowercase_ = config.decoder_hidden_size
lowercase_ = '''decoder.decoder_layers.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = config.hidden_size
lowercase_ = '''vit.encoder.layer.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def a ( snake_case__: str , snake_case__: int ):
'''simple docstring'''
lowercase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase_ = 1_024
lowercase_ = 4_096
lowercase_ = 24
lowercase_ = 16
elif "huge" in checkpoint_url:
lowercase_ = 14
lowercase_ = 1_280
lowercase_ = 5_120
lowercase_ = 32
lowercase_ = 16
lowercase_ = ViTMAEForPreTraining(snake_case__ )
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''model''']
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
lowercase_ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
if "large" in checkpoint_url:
lowercase_ = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowercase_ = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowercase_ = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 97 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _UpperCAmelCase( _UpperCAmelCase ):
lowercase__ = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase__ = Features({'text': Value('string' )} )
lowercase__ = Features({'summary': Value('string' )} )
lowercase__ = "text"
lowercase__ = "summary"
@property
def UpperCAmelCase ( self) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 704 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if openai_config_file == "":
_UpperCamelCase = OpenAIGPTConfig()
else:
_UpperCamelCase = OpenAIGPTConfig.from_json_file(__snake_case )
_UpperCamelCase = OpenAIGPTModel(__snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), __snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_a = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 78 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=50, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Union[str, Any] = use_input_mask
_lowercase : str = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Any = initializer_range
_lowercase : List[str] = use_labels
_lowercase : Tuple = scope
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : int = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : str = self.prepare_config_and_inputs()
_lowercase : Any = True
_lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> Any:
"""simple docstring"""
_lowercase : Dict = BertGenerationEncoder(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : List[Any] = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> Any:
"""simple docstring"""
_lowercase : int = True
_lowercase : Any = BertGenerationEncoder(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, )
_lowercase : Optional[int] = model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : str = True
_lowercase : str = True
_lowercase : Union[str, Any] = BertGenerationDecoder(config=lowerCamelCase).to(lowerCamelCase).eval()
# first forward pass
_lowercase : Tuple = model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, use_cache=lowerCamelCase, )
_lowercase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase : Any = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowercase : List[str] = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
_lowercase : Optional[Any] = torch.cat([input_ids, next_tokens], dim=-1)
_lowercase : Any = torch.cat([input_mask, next_mask], dim=-1)
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, output_hidden_states=lowerCamelCase, )['hidden_states'][0]
_lowercase : Tuple = model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, output_hidden_states=lowerCamelCase, )['hidden_states'][0]
# select random slice
_lowercase : str = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowercase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, *lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = BertGenerationDecoder(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase_ : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = BertGenerationEncoderTester(self)
_lowercase : int = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[Any] = 'bert'
self.model_tester.create_and_check_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_lowercase : List[str] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
_lowercase : Optional[Any] = model(lowerCamelCase)[0]
_lowercase : Dict = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Optional[Any] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4))
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_lowercase : List[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase)[0]
_lowercase : int = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]])
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4))
| 89 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase : str =cst_fwd.get(__A , np.inf )
lowercase : str =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase : int =new_cost_f
lowercase : Optional[int] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase : List[Any] =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase_ ( __A : str , __A : str , __A : dict , __A : dict ) -> int:
"""simple docstring"""
lowercase : Optional[Any] =-1
lowercase : Tuple =set()
lowercase : str =set()
lowercase : List[str] ={source: 0}
lowercase : List[str] ={destination: 0}
lowercase : Tuple ={source: None}
lowercase : List[Any] ={destination: None}
lowercase : PriorityQueue[Any] =PriorityQueue()
lowercase : PriorityQueue[Any] =PriorityQueue()
lowercase : Any =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase , lowercase : int =queue_forward.get()
visited_forward.add(__A )
lowercase , lowercase : int =queue_backward.get()
visited_backward.add(__A )
lowercase : Dict =pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
lowercase : List[Any] =pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase : Union[str, Any] =shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
SCREAMING_SNAKE_CASE = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __snake_case ( self : Tuple )->Any:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : int = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__SCREAMING_SNAKE_CASE : List[Any] = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def __snake_case ( self : List[Any] , **UpperCamelCase : Union[str, Any] )->Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Optional[Any] , **UpperCamelCase : Tuple )->Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Any , **UpperCamelCase : Any )->Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : str )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def __snake_case ( self : int )->Dict:
__SCREAMING_SNAKE_CASE : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def __snake_case ( self : Tuple )->Any:
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(UpperCamelCase , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Any = processor(images=UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : str )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = "lower newer"
__SCREAMING_SNAKE_CASE : Any = processor(text=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : Dict )->List[str]:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = "lower newer"
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Tuple = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def __snake_case ( self : Optional[Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=UpperCamelCase , visual_prompt=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def __snake_case ( self : List[str] )->Dict:
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
| 717 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : List[str] , UpperCamelCase : Optional[NestedDataStructureLike[PathLike]] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Dict , )->int:
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths
__SCREAMING_SNAKE_CASE : Optional[int] = split if split or isinstance(UpperCamelCase , UpperCamelCase ) else "train"
__SCREAMING_SNAKE_CASE : Dict = features
__SCREAMING_SNAKE_CASE : List[Any] = cache_dir
__SCREAMING_SNAKE_CASE : Tuple = keep_in_memory
__SCREAMING_SNAKE_CASE : Optional[Any] = streaming
__SCREAMING_SNAKE_CASE : Optional[Any] = num_proc
__SCREAMING_SNAKE_CASE : int = kwargs
@abstractmethod
def __snake_case ( self : Optional[int] )->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Dict , )->Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = features
__SCREAMING_SNAKE_CASE : str = cache_dir
__SCREAMING_SNAKE_CASE : Any = keep_in_memory
__SCREAMING_SNAKE_CASE : Optional[Any] = streaming
__SCREAMING_SNAKE_CASE : int = num_proc
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs
@abstractmethod
def __snake_case ( self : str )->Union[Dataset, IterableDataset]:
pass
| 447 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
SCREAMING_SNAKE_CASE__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
SCREAMING_SNAKE_CASE__ = TaTokenizerFast
SCREAMING_SNAKE_CASE__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 532 |
"""simple docstring"""
class lowercase :
def __init__( self ) -> Any:
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = []
def _snake_case ( self , lowercase , lowercase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCAmelCase = self.__min_dist_top_down_dp(lowercase , n - 1 )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , lowercase )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
def _snake_case ( self , lowercase , lowercase ) -> int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = [[-1 for _ in range(len(lowercase ) )] for _ in range(len(lowercase ) )]
return self.__min_dist_top_down_dp(len(lowercase ) - 1 , len(lowercase ) - 1 )
def _snake_case ( self , lowercase , lowercase ) -> int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = len(lowercase )
lowerCAmelCase = len(lowercase )
lowerCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCAmelCase = j
elif j == 0: # second string is empty
lowerCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCAmelCase = self.dp[i - 1][j - 1]
else:
lowerCAmelCase = self.dp[i][j - 1]
lowerCAmelCase = self.dp[i - 1][j]
lowerCAmelCase = self.dp[i - 1][j - 1]
lowerCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
SCREAMING_SNAKE_CASE__ = input("Enter the first string: ").strip()
SCREAMING_SNAKE_CASE__ = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 532 | 1 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : str=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=1000 , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = range_bbox
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase = bbox[i, j, 3]
__lowercase = bbox[i, j, 1]
__lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase = bbox[i, j, 2]
__lowercase = bbox[i, j, 0]
__lowercase = t
__lowercase = tf.convert_to_tensor(_lowerCAmelCase )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFLayoutLMModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__snake_case :Union[str, Any] = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case :str = False
__snake_case :Dict = True
__snake_case :Tuple = 1_0
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFLayoutLMModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _a ( self : Any ) -> Any:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def _a ( self : str ) -> Any:
"""simple docstring"""
pass
def snake_case ( ):
'''simple docstring'''
__lowercase = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowercase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
__lowercase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
__lowercase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1e-3 ) )
@slow
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowercase = outputs.loss
__lowercase = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
__lowercase = outputs.logits
__lowercase = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
__lowercase = outputs.logits
__lowercase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
__lowercase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 80 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 129 | 0 |
from collections import deque
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = deque()
SCREAMING_SNAKE_CASE = [False for _ in range(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE = [-1 for _ in range(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = index # the number when this node is seen
SCREAMING_SNAKE_CASE = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(__lowerCAmelCase )
while w != v:
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
SCREAMING_SNAKE_CASE = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE_ = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 709 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : int ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 116 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_SCREAMING_SNAKE_CASE = {
"b0": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_24,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_40,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 14_08,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_60,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 15_36,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_00,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 17_92,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_80,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 20_48,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_56,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 23_04,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_28,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 25_60,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_00,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase = EfficientNetConfig()
_lowerCAmelCase = CONFIG_MAP[model_name]["hidden_dim"]
_lowerCAmelCase = CONFIG_MAP[model_name]["width_coef"]
_lowerCAmelCase = CONFIG_MAP[model_name]["depth_coef"]
_lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
_lowerCAmelCase = CONFIG_MAP[model_name]["dropout_rate"]
_lowerCAmelCase = CONFIG_MAP[model_name]["dw_padding"]
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = 1000
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
_lowerCAmelCase = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=SCREAMING_SNAKE_CASE_ , )
return preprocessor
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_lowerCAmelCase = sorted(set(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {b: str(SCREAMING_SNAKE_CASE_ ) for b, i in zip(SCREAMING_SNAKE_CASE_ , range(SCREAMING_SNAKE_CASE_ ) )}
_lowerCAmelCase = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_lowerCAmelCase = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_lowerCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowerCAmelCase = "efficientnet." + item[1]
_lowerCAmelCase = "classifier.weight"
_lowerCAmelCase = "classifier.bias"
return key_mapping
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowerCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowerCAmelCase = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE_ ) )
else:
_lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE_ , weights="imagenet" , input_tensor=SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , pooling=SCREAMING_SNAKE_CASE_ , classes=1000 , classifier_activation="softmax" , )
_lowerCAmelCase = original_model.trainable_variables
_lowerCAmelCase = original_model.non_trainable_variables
_lowerCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowerCAmelCase = param.numpy()
_lowerCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_lowerCAmelCase = get_efficientnet_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
_lowerCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE_ )
replace_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Initialize preprocessor and preprocess input image
_lowerCAmelCase = convert_image_processor(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowerCAmelCase = hf_model(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_lowerCAmelCase = False
_lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
_lowerCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowerCAmelCase = image.img_to_array(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = np.expand_dims(SCREAMING_SNAKE_CASE_ , axis=0 )
_lowerCAmelCase = original_model.predict(SCREAMING_SNAKE_CASE_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.mkdir(SCREAMING_SNAKE_CASE_ )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_lowerCAmelCase = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE_ )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 18 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : str = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : int = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : str = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Any = "I was born in 92000, and this is falsé."
_lowercase : Dict = tokenizer.tokenize(_UpperCamelCase )
_lowercase : List[Any] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : int = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : Optional[Any] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : int = "This is a simple input"
_lowercase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : str = ("This is a simple input", "This is a pair")
_lowercase : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : Dict = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Optional[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : Tuple = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : int = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : Optional[int] = encoded_sequence["input_ids"].shape
_lowercase : List[Any] = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Dict = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 245 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=10_24 ) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE = new_src + ' ' + src
SCREAMING_SNAKE_CASE = new_tgt + ' ' + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'{split}.source', data_dir / F'{split}.target'
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowercase () -> int:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE_ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE_ , default=1_28 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 327 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ) -> int:
SCREAMING_SNAKE_CASE = defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 327 | 1 |
from collections import defaultdict
from math import gcd
def _A ( __snake_case :int = 150_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = defaultdict(__snake_case )
__SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
__SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 1 |
'''simple docstring'''
import argparse
import os
import re
_lowercase : List[Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
_lowercase : str = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : str = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : List[Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Tuple = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : str = re.compile(r"\[([^\]]+)\]")
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
lowercase_ : Union[str, Any] = _re_indent.search(UpperCAmelCase__ )
return "" if search is None else search.groups()[0]
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]="" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None ) -> int:
lowercase_ : Dict = 0
lowercase_ : List[str] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase__ ):
index += 1
lowercase_ : str = ["""\n""".join(lines[:index] )]
else:
lowercase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCAmelCase__ ) )
if index < len(UpperCAmelCase__ ) - 1:
lowercase_ : str = [lines[index + 1]]
index += 1
else:
lowercase_ : int = []
else:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
lowercase_ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase__ ) > 0:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
def _inner(UpperCAmelCase__ : List[str] ):
return key(UpperCAmelCase__ ).lower().replace("""_""" , """""" )
return _inner
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None ) -> int:
# If no key is provided, we use a noop.
def noop(UpperCAmelCase__ : List[Any] ):
return x
if key is None:
lowercase_ : int = noop
# Constants are all uppercase, they go first.
lowercase_ : Any = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ : Union[str, Any] = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ : str = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()]
lowercase_ : Dict = ignore_underscore(UpperCAmelCase__ )
return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Optional[int]:
# This inner function sort imports between [ ].
def _replace(UpperCAmelCase__ : Dict ):
lowercase_ : str = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : int = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] ) + "]"
lowercase_ : Union[str, Any] = import_statement.split("""\n""" )
if len(UpperCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ : Optional[int] = 2 if lines[1].strip() == """[""" else 1
lowercase_ : Tuple = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ : Dict = sort_objects(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )
lowercase_ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : List[str] = keys[:-1]
lowercase_ : Optional[Any] = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] )
return "\n".join(UpperCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ : List[Any] = _re_bracket_content.sub(_replace , UpperCAmelCase__ )
return import_statement
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=True ) -> Any:
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
lowercase_ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ : Union[str, Any] = split_code_in_indented_blocks(
UpperCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ : List[str] = main_blocks[block_idx]
lowercase_ : Union[str, Any] = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ : Union[str, Any] = 0
while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ : int = len(UpperCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ : Dict = """\n""".join(block_lines[line_idx:-1] )
lowercase_ : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ : List[str] = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ : str = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ : List[Any] = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None]
lowercase_ : List[str] = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ : Any = 0
lowercase_ : str = []
for i in range(len(UpperCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ : List[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : List[Any]=True ) -> Dict:
lowercase_ : List[Any] = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
lowercase_ : Optional[Any] = sort_imports(os.path.join(UpperCAmelCase__ , """__init__.py""" ) , check_only=UpperCAmelCase__ )
if result:
lowercase_ : Dict = [os.path.join(UpperCAmelCase__ , """__init__.py""" )]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 30 | '''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30 | 1 |
import math
__lowerCamelCase = 10
__lowerCamelCase = 7
__lowerCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( __lowerCamelCase : List[Any] = 20 ):
snake_case : Any = math.comb(__a , __a )
snake_case : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __a )
snake_case : Any = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 204 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = '''tokenizer_file'''
UpperCAmelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
SCREAMING_SNAKE_CASE_ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ )['''input_ids''']
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
SCREAMING_SNAKE_CASE_ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = next(iter(SCREAMING_SNAKE_CASE_ ) )['''premise'''] # pick up one data
SCREAMING_SNAKE_CASE_ = list(sample_data.values() )
SCREAMING_SNAKE_CASE_ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 626 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__magic_name__ : str = logging.get_logger(__name__)
__magic_name__ : Optional[int] = '''Hello world! cécé herlolip'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = FairseqRobertaModel.from_pretrained(lowerCAmelCase__)
roberta.eval() # disable dropout
UpperCamelCase = roberta.model.encoder.sentence_encoder
UpperCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCAmelCase__)
UpperCamelCase = XLMRobertaXLForSequenceClassification(lowerCAmelCase__) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__)
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase = roberta_sent_encoder.embed_positions.weight
UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase = roberta_sent_encoder.layer_norm.weight
UpperCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
UpperCamelCase = model.roberta.encoder.layer[i]
UpperCamelCase = roberta_sent_encoder.layers[i]
UpperCamelCase = layer.attention
UpperCamelCase = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
)
UpperCamelCase = roberta_layer.self_attn.q_proj.weight
UpperCamelCase = roberta_layer.self_attn.q_proj.bias
UpperCamelCase = roberta_layer.self_attn.k_proj.weight
UpperCamelCase = roberta_layer.self_attn.k_proj.bias
UpperCamelCase = roberta_layer.self_attn.v_proj.weight
UpperCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase = roberta_layer.self_attn.out_proj.weight
UpperCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase = roberta_layer.final_layer_norm.weight
UpperCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# output
UpperCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase = roberta.model.encoder.lm_head.weight
UpperCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase = roberta.encode(lowerCAmelCase__).unsqueeze(0) # batch of size 1
UpperCamelCase = model(lowerCAmelCase__)[0]
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCAmelCase__))
else:
UpperCamelCase = roberta.model(lowerCAmelCase__)[0]
print(our_output.shape , their_output.shape)
UpperCamelCase = torch.max(torch.abs(our_output - their_output)).item()
print(F'max_absolute_diff = {max_absolute_diff}') # ~ 1e-7
UpperCamelCase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3)
print('Do both models output the same tensors?' , '🔥' if success else '💩')
if not success:
raise Exception('Something went wRoNg')
pathlib.Path(lowerCAmelCase__).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
print(F'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCAmelCase__)
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__magic_name__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 709 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__magic_name__ : Union[str, Any] = get_logger()
__magic_name__ : Optional[dict] = None
class A__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Expected {device} to be a `str` not {type(_SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCamelCase = device if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
UpperCamelCase = str(jax.devices()[0] )
UpperCamelCase = jnp_array_kwargs
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import jax
return {str(_SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_SCREAMING_SNAKE_CASE , axis=0 )
return column
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase = {'dtype': jnp.intaa}
else:
UpperCamelCase = {'dtype': jnp.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_SCREAMING_SNAKE_CASE , '__array__' ) and not isinstance(_SCREAMING_SNAKE_CASE , jax.Array ):
UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 410 | 0 |
import numpy as np
import qiskit
def snake_case( __magic_name__ = 8 , __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = np.random.default_rng(seed=__magic_name__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowercase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
lowercase : str = rng.integers(2 , size=__magic_name__ )
# The set of states Alice will prepare.
lowercase : Dict = rng.integers(2 , size=__magic_name__ )
# Measurement basis for Bob's qubits.
lowercase : int = rng.integers(2 , size=__magic_name__ )
# Quantum Circuit to simulate BB84
lowercase : Optional[int] = qiskit.QuantumCircuit(__magic_name__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__magic_name__ ):
if alice_state[index] == 1:
bbaa_circ.x(__magic_name__ )
if alice_basis[index] == 1:
bbaa_circ.h(__magic_name__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__magic_name__ ):
if bob_basis[index] == 1:
bbaa_circ.h(__magic_name__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowercase : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowercase : int = qiskit.execute(__magic_name__ , __magic_name__ , shots=1 , seed_simulator=__magic_name__ )
# Returns the result of measurement.
lowercase : str = job.result().get_counts(__magic_name__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowercase : Union[str, Any] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__magic_name__ , __magic_name__ , __magic_name__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowercase : str = gen_key[:key_len] if len(__magic_name__ ) >= key_len else gen_key.ljust(__magic_name__ , '''0''' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod() | 217 |
from __future__ import annotations
def snake_case( __magic_name__ , __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : list[list[int]] = []
lowercase : list[int] = []
lowercase : List[str] = 0
lowercase : Any = sum(__magic_name__ )
create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return result
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None:
'''simple docstring'''
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ , len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 217 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
A = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
A = '▁'
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BarthezTokenizer
def __init__( self : int , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Tuple="<s>" , snake_case__ : str="</s>" , snake_case__ : int="</s>" , snake_case__ : List[Any]="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Optional[Any]="<mask>" , **snake_case__ : Dict , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def _snake_case ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,) | 702 | from itertools import product
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
_lowerCamelCase = sides_number
_lowerCamelCase = max_face_number * dice_number
_lowerCamelCase = [0] * (max_total + 1)
_lowerCamelCase = 1
_lowerCamelCase = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
_lowerCamelCase = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
_lowerCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCamelCase = 0
_lowerCamelCase = 9
_lowerCamelCase = 4 * 9
_lowerCamelCase = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCamelCase = (4**9) * (6**6)
_lowerCamelCase = peter_wins_count / total_games_number
_lowerCamelCase = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''') | 234 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """instructblip_vision_model"""
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=1E-6 , __A=0.0 , __A=1E-10 , __A=True , **__A , ):
super().__init__(**__A )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def snake_case_ ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__a , __a = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__a = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """instructblip_qformer"""
def __init__( self , __A=30522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ):
super().__init__(pad_token_id=__A , **__A )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def snake_case_ ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__a , __a = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__a = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """instructblip"""
_lowerCamelCase = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ):
super().__init__(**__A )
if vision_config is None:
__a = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
__a = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
__a = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__a = InstructBlipVisionConfig(**__A )
__a = InstructBlipQFormerConfig(**__A )
__a = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__a = CONFIG_MAPPING[text_model_type](**__A )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def snake_case_ ( cls , __A , __A , __A , **__A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def snake_case_ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 99 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return int(x / 2**20 )
class A__ :
def __enter__( self ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__magic_name__ : List[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCamelCase ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
__magic_name__ : Optional[Any] = torch.cuda.memory_allocated()
__magic_name__ : str = torch.cuda.max_memory_allocated()
__magic_name__ : List[Any] = bamb(self.end - self.begin )
__magic_name__ : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16, UpperCAmelCase = "bert-base-cased", UpperCAmelCase = 320, UpperCAmelCase = 160, ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase )
__magic_name__ : Optional[int] = load_dataset(
'''glue''', '''mrpc''', split={'''train''': F'''train[:{n_train}]''', '''validation''': F'''validation[:{n_val}]'''} )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ : Optional[int] = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
__magic_name__ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Union[str, Any] = int(config['''num_epochs'''] )
__magic_name__ : Optional[Any] = int(config['''seed'''] )
__magic_name__ : List[Any] = int(config['''batch_size'''] )
__magic_name__ : int = args.model_name_or_path
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase, return_dict=UpperCAmelCase )
# Instantiate optimizer
__magic_name__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ : Tuple = optimizer_cls(params=model.parameters(), lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ : Optional[Any] = 1
__magic_name__ : str = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=0, num_training_steps=UpperCAmelCase, )
else:
__magic_name__ : Optional[Any] = DummyScheduler(UpperCAmelCase, total_num_steps=UpperCAmelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__ : int = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ : Union[str, Any] = 0
# Now we train the model
__magic_name__ : Tuple = {}
for epoch in range(UpperCAmelCase, UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCAmelCase ):
__magic_name__ : Optional[Any] = model(**UpperCAmelCase )
__magic_name__ : Any = outputs.loss
__magic_name__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__magic_name__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, '''peak_memory_utilization.json''' ), '''w''' ) as f:
json.dump(UpperCAmelCase, UpperCAmelCase )
def lowerCAmelCase ( ) ->Tuple:
"""simple docstring"""
__magic_name__ : Any = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=UpperCAmelCase, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCAmelCase, )
parser.add_argument(
'''--output_dir''', type=UpperCAmelCase, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--peak_memory_upper_bound''', type=UpperCAmelCase, default=UpperCAmelCase, help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''', )
parser.add_argument(
'''--n_train''', type=UpperCAmelCase, default=320, help='''Number of training examples to use.''', )
parser.add_argument(
'''--n_val''', type=UpperCAmelCase, default=160, help='''Number of validation examples to use.''', )
parser.add_argument(
'''--num_epochs''', type=UpperCAmelCase, default=1, help='''Number of train epochs.''', )
__magic_name__ : str = parser.parse_args()
__magic_name__ : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 154 | 0 |
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 2
lowerCamelCase_ = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
lowerCamelCase_ = [True] * (end + 1)
lowerCamelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
lowerCamelCase_ = False
start += 1
prime += in_prime
lowerCamelCase_ = end + 1
lowerCamelCase_ = min(2 * end , _lowerCamelCase )
while low <= n:
lowerCamelCase_ = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
lowerCamelCase_ = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase_ = high + 1
lowerCamelCase_ = min(high + end , _lowerCamelCase )
return prime
print(sieve(1_0**6)) | 66 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowercase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 66 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 1_00_00_00 ):
'''simple docstring'''
_snake_case = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_snake_case = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_snake_case = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 672 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowerCAmelCase = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_lowerCAmelCase = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_lowerCAmelCase = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def lowercase ( _a ) -> Optional[Any]:
def remove_articles(_a ):
UpperCAmelCase_: int = re.compile(r"\b(a|an|the)\b" ,re.UNICODE )
return re.sub(_a ," " ,_a )
def white_space_fix(_a ):
return " ".join(text.split() )
def remove_punc(_a ):
UpperCAmelCase_: Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def lowercase ( _a ,_a ) -> List[Any]:
return int(normalize_answer(_a ) == normalize_answer(_a ) )
def lowercase ( _a ,_a ) -> int:
UpperCAmelCase_: Optional[int] = [any(compute_exact(_a ,_a ) for ref in refs ) for pred, refs in zip(_a ,_a )]
return (sum(_a ) / len(_a )) * 100
def lowercase ( _a ,_a ,_a ,_a ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase_: Optional[int] = Counter(_a )
UpperCAmelCase_: Dict = Counter(_a )
UpperCAmelCase_: Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase_: Optional[Any] = scount * numref
UpperCAmelCase_: Dict = Counter(_a )
UpperCAmelCase_: Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase_: str = ccount * numref
# KEEP
UpperCAmelCase_: List[str] = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase_: str = keepgramcounter_rep & rgramcounter
UpperCAmelCase_: int = sgramcounter_rep & rgramcounter
UpperCAmelCase_: str = 0
UpperCAmelCase_: Optional[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_: Tuple = 1
UpperCAmelCase_: Any = 1
if len(_a ) > 0:
UpperCAmelCase_: List[Any] = keeptmpscorea / len(_a )
if len(_a ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase_: Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase_: List[str] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase_: List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase_: List[str] = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase_: Union[str, Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase_: List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_: List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_: Tuple = 1
if len(_a ) > 0:
UpperCAmelCase_: Optional[Any] = deltmpscorea / len(_a )
# ADDITION
UpperCAmelCase_: List[str] = set(_a ) - set(_a )
UpperCAmelCase_: Optional[Any] = set(_a ) & set(_a )
UpperCAmelCase_: Dict = set(_a ) - set(_a )
UpperCAmelCase_: Optional[int] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 1
if len(_a ) > 0:
UpperCAmelCase_: str = addtmpscore / len(_a )
if len(_a ) > 0:
UpperCAmelCase_: Optional[Any] = addtmpscore / len(_a )
UpperCAmelCase_: Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase_: Tuple = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase ( _a ,_a ,_a ) -> List[Any]:
UpperCAmelCase_: Dict = len(_a )
UpperCAmelCase_: int = ssent.split(" " )
UpperCAmelCase_: Union[str, Any] = csent.split(" " )
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Any = []
UpperCAmelCase_: str = []
UpperCAmelCase_: Optional[int] = []
UpperCAmelCase_: Optional[int] = []
UpperCAmelCase_: Optional[int] = []
UpperCAmelCase_: Any = []
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: List[Any] = []
UpperCAmelCase_: Union[str, Any] = []
for rsent in rsents:
UpperCAmelCase_: Union[str, Any] = rsent.split(" " )
UpperCAmelCase_: Any = []
UpperCAmelCase_: List[Any] = []
UpperCAmelCase_: Any = []
ragramslist.append(_a )
for i in range(0 ,len(_a ) - 1 ):
if i < len(_a ) - 1:
UpperCAmelCase_: Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_a )
if i < len(_a ) - 2:
UpperCAmelCase_: Tuple = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_a )
if i < len(_a ) - 3:
UpperCAmelCase_: str = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_a )
ragramslist.append(_a )
ragramslist.append(_a )
ragramslist.append(_a )
for i in range(0 ,len(_a ) - 1 ):
if i < len(_a ) - 1:
UpperCAmelCase_: Optional[Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_a )
if i < len(_a ) - 2:
UpperCAmelCase_: str = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_a )
if i < len(_a ) - 3:
UpperCAmelCase_: List[Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_a )
for i in range(0 ,len(_a ) - 1 ):
if i < len(_a ) - 1:
UpperCAmelCase_: List[Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_a )
if i < len(_a ) - 2:
UpperCAmelCase_: Dict = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_a )
if i < len(_a ) - 3:
UpperCAmelCase_: Optional[int] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_a )
(UpperCAmelCase_): Union[str, Any] = SARIngram(_a ,_a ,_a ,_a )
(UpperCAmelCase_): Optional[Any] = SARIngram(_a ,_a ,_a ,_a )
(UpperCAmelCase_): Union[str, Any] = SARIngram(_a ,_a ,_a ,_a )
(UpperCAmelCase_): int = SARIngram(_a ,_a ,_a ,_a )
UpperCAmelCase_: Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase_: str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase_: Any = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase_: Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase ( _a ,_a = True ,_a = "13a" ,_a = True ) -> Optional[int]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase_: Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase_: List[str] = sacrebleu.metrics.bleu._get_tokenizer(_a )()(_a )
else:
UpperCAmelCase_: Any = sacrebleu.TOKENIZERS[tokenizer]()(_a )
elif tokenizer == "moses":
UpperCAmelCase_: Optional[Any] = sacremoses.MosesTokenizer().tokenize(_a ,return_str=_a ,escape=_a )
elif tokenizer == "penn":
UpperCAmelCase_: List[str] = sacremoses.MosesTokenizer().penn_tokenize(_a ,return_str=_a )
else:
UpperCAmelCase_: Dict = sentence
if not return_str:
UpperCAmelCase_: Optional[int] = normalized_sent.split()
return normalized_sent
def lowercase ( _a ,_a ,_a ) -> Optional[Any]:
if not (len(_a ) == len(_a ) == len(_a )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCAmelCase_: Optional[int] = 0
for src, pred, refs in zip(_a ,_a ,_a ):
sari_score += SARIsent(normalize(_a ) ,normalize(_a ) ,[normalize(_a ) for sent in refs] )
UpperCAmelCase_: Dict = sari_score / len(_a )
return 100 * sari_score
def lowercase ( _a ,_a ,_a="exp" ,_a=None ,_a=False ,_a=False ,_a=False ,) -> Optional[Any]:
UpperCAmelCase_: str = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCAmelCase_: Union[str, Any] = [[refs[i] for refs in references] for i in range(_a )]
UpperCAmelCase_: List[Any] = sacrebleu.corpus_bleu(
_a ,_a ,smooth_method=_a ,smooth_value=_a ,force=_a ,lowercase=_a ,use_effective_order=_a ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def snake_case_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def snake_case_ ( self , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = {}
result.update({"sari": compute_sari(sources=A__ , predictions=A__ , references=A__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A__ , references=A__ )} )
result.update({"exact": compute_em(predictions=A__ , references=A__ )} )
return result | 716 |
def lowercase ( _a ) -> bool:
if not isinstance(_a ,_a ):
UpperCAmelCase_: Dict = f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 0:
return False
UpperCAmelCase_: Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ):
_UpperCAmelCase : Any = []
for old_item in old_list:
_UpperCAmelCase : Any = old_item.replace('''in_layers.0''' , '''norm1''' )
_UpperCAmelCase : Optional[Any] = new_item.replace('''in_layers.2''' , '''conv1''' )
_UpperCAmelCase : Optional[int] = new_item.replace('''out_layers.0''' , '''norm2''' )
_UpperCAmelCase : Optional[int] = new_item.replace('''out_layers.3''' , '''conv2''' )
_UpperCAmelCase : Optional[Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
_UpperCAmelCase : Dict = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
_UpperCAmelCase : Optional[Any] = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]=0 ):
_UpperCAmelCase : Optional[int] = []
for old_item in old_list:
_UpperCAmelCase : Tuple = old_item
_UpperCAmelCase : int = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
_UpperCAmelCase : str = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
_UpperCAmelCase : Dict = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
_UpperCAmelCase : Optional[Any] = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
_UpperCAmelCase : int = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_UpperCAmelCase : Optional[int] = old_checkpoint[path]
_UpperCAmelCase : List[str] = old_tensor.shape[0] // 3
_UpperCAmelCase : Optional[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_UpperCAmelCase : Dict = old_tensor.shape[0] // config['''num_head_channels'''] // 3
_UpperCAmelCase : Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_UpperCAmelCase : str = query.reshape(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = key.reshape(UpperCamelCase__ )
_UpperCAmelCase : int = value.reshape(UpperCamelCase__ )
for path in paths:
_UpperCAmelCase : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_UpperCAmelCase : Optional[Any] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
_UpperCAmelCase : Dict = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
_UpperCAmelCase : Optional[Any] = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_UpperCAmelCase : Optional[int] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_UpperCAmelCase : Optional[int] = old_checkpoint[path['''old''']][:, :, 0]
else:
_UpperCAmelCase : Dict = old_checkpoint[path['''old''']]
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Any ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = checkpoint['''time_embed.0.weight''']
_UpperCAmelCase : Optional[Any] = checkpoint['''time_embed.0.bias''']
_UpperCAmelCase : int = checkpoint['''time_embed.2.weight''']
_UpperCAmelCase : Tuple = checkpoint['''time_embed.2.bias''']
_UpperCAmelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.weight''']
_UpperCAmelCase : Any = checkpoint['''input_blocks.0.0.bias''']
_UpperCAmelCase : int = checkpoint['''out.0.weight''']
_UpperCAmelCase : str = checkpoint['''out.0.bias''']
_UpperCAmelCase : Dict = checkpoint['''out.2.weight''']
_UpperCAmelCase : Optional[int] = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_UpperCAmelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_UpperCAmelCase : Optional[Any] = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the middle blocks only
_UpperCAmelCase : Dict = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_UpperCAmelCase : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the output blocks only
_UpperCAmelCase : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_UpperCAmelCase : Tuple = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
for i in range(1 , UpperCamelCase__ ):
_UpperCAmelCase : Dict = (i - 1) // (config['''num_res_blocks'''] + 1)
_UpperCAmelCase : Tuple = (i - 1) % (config['''num_res_blocks'''] + 1)
_UpperCAmelCase : List[Any] = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
_UpperCAmelCase : Any = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
_UpperCAmelCase : int = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
_UpperCAmelCase : Optional[int] = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
_UpperCAmelCase : List[Any] = renew_resnet_paths(UpperCamelCase__ )
_UpperCAmelCase : int = {'''old''': F'input_blocks.{i}.0', '''new''': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
_UpperCAmelCase : Optional[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase__ )
if len(UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = renew_attention_paths(UpperCamelCase__ )
_UpperCAmelCase : List[str] = {
'''old''': F'input_blocks.{i}.1',
'''new''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_UpperCAmelCase : Union[str, Any] = {
F'input_blocks.{i}.1.qkv.bias': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ , )
_UpperCAmelCase : Optional[int] = middle_blocks[0]
_UpperCAmelCase : Optional[Any] = middle_blocks[1]
_UpperCAmelCase : str = middle_blocks[2]
_UpperCAmelCase : Union[str, Any] = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = renew_attention_paths(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
_UpperCAmelCase : Tuple = i // (config['''num_res_blocks'''] + 1)
_UpperCAmelCase : Any = i % (config['''num_res_blocks'''] + 1)
_UpperCAmelCase : List[Any] = [shave_segments(UpperCamelCase__ , 2 ) for name in output_blocks[i]]
_UpperCAmelCase : Optional[Any] = {}
for layer in output_block_layers:
_UpperCAmelCase , _UpperCAmelCase : int = layer.split('''.''' )[0], shave_segments(UpperCamelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase__ )
else:
_UpperCAmelCase : List[str] = [layer_name]
if len(UpperCamelCase__ ) > 1:
_UpperCAmelCase : int = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
_UpperCAmelCase : Dict = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
_UpperCAmelCase : Optional[Any] = renew_resnet_paths(UpperCamelCase__ )
_UpperCAmelCase : List[str] = renew_resnet_paths(UpperCamelCase__ )
_UpperCAmelCase : int = {'''old''': F'output_blocks.{i}.0', '''new''': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_UpperCAmelCase : int = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_UpperCAmelCase : Union[str, Any] = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
_UpperCAmelCase : Any = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase__ ) == 2:
_UpperCAmelCase : Any = []
if len(UpperCamelCase__ ):
_UpperCAmelCase : List[str] = renew_attention_paths(UpperCamelCase__ )
_UpperCAmelCase : List[str] = {
'''old''': F'output_blocks.{i}.1',
'''new''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_UpperCAmelCase : str = {
F'output_blocks.{i}.1.qkv.bias': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=UpperCamelCase__ , )
else:
_UpperCAmelCase : Union[str, Any] = renew_resnet_paths(UpperCamelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_UpperCAmelCase : Optional[Any] = '''.'''.join(['''output_blocks''', str(UpperCamelCase__ ), path['''old''']] )
_UpperCAmelCase : int = '''.'''.join(['''up_blocks''', str(UpperCamelCase__ ), '''resnets''', str(UpperCamelCase__ ), path['''new''']] )
_UpperCAmelCase : List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowerCAmelCase :Optional[Any] = parser.parse_args()
_lowerCAmelCase :Any = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase :Optional[int] = json.loads(f.read())
_lowerCAmelCase :str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase :Tuple = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase :Tuple = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
_lowerCAmelCase :int = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
_lowerCAmelCase :Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 506 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''dpt'''
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> int:
super().__init__(**A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase : int = BitConfig(**A )
elif isinstance(A , A ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : Union[str, Any] = BitConfig(**A )
elif isinstance(A , A ):
_UpperCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase : int = backbone_featmap_shape
_UpperCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : int = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Any = head_in_index
_UpperCAmelCase : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : int = auxiliary_loss_weight
_UpperCAmelCase : Any = semantic_loss_ignore_index
_UpperCAmelCase : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Any = self.backbone_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 506 | 1 |
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = 0.0 , snake_case__ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[int]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> str:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase =features.copy()
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =jsonl_path
elif issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =[jsonl_path]
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Dict:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__UpperCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if split:
__UpperCAmelCase ={split: jsonl_path}
else:
__UpperCAmelCase ='''train'''
__UpperCAmelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
return json.load(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
return [json.loads(snake_case__ ) for line in buffer]
class _SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
def A__ (self , UpperCAmelCase):
'''simple docstring'''
with pytest.raises(UpperCAmelCase):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =tmp_path_factory.mktemp('''data''') / f"""test.json.{extension}"""
__UpperCAmelCase =str(shared_datadir / f"""test_file.json.{extension}""")
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , compression=UpperCAmelCase).write()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
assert exported_content == original_content
| 142 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :Dict , _UpperCAmelCase :Any , _UpperCAmelCase :Dict , _UpperCAmelCase :int ) -> List[str]:
'''simple docstring'''
with open(_lowercase ) as metadata_file:
A_ = json.load(_lowercase )
A_ = LukeConfig(use_entity_aware_attention=_lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
A_ = torch.load(_lowercase , map_location='''cpu''' )
# Load the entity vocab file
A_ = load_entity_vocab(_lowercase )
A_ = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken('''<ent>''' , lstrip=_lowercase , rstrip=_lowercase )
A_ = AddedToken('''<ent2>''' , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
A_ = LukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
A_ = state_dict['embeddings.word_embeddings.weight']
A_ = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'encoder.layer.{layer_index}.attention.self.'
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict['entity_embeddings.entity_embeddings.weight']
A_ = entity_emb[entity_vocab['[MASK]']]
A_ = LukeModel(config=_lowercase ).eval()
A_ = model.load_state_dict(_lowercase , strict=_lowercase )
if not (len(_lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(_lowercase )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(_lowercase , task='''entity_classification''' )
A_ = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
A_ = (39, 42)
A_ = tokenizer(_lowercase , entity_spans=[span] , add_prefix_space=_lowercase , return_tensors='''pt''' )
A_ = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowercase ) )
model.save_pretrained(_lowercase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] ) -> List[str]:
'''simple docstring'''
A_ = {}
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowercase ):
A_ = line.rstrip().split('''\t''' )
A_ = index
return entity_vocab
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 188 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[float] , _lowercase : Tuple ) -> int:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowercase ):
print(f"""{i}\t\t{d}""" )
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : list[float] , _lowercase : int ) -> Any:
'''simple docstring'''
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : int , _lowercase : int , _lowercase : int ) -> list[float]:
'''simple docstring'''
lowercase__ : Dict = [float('inf' )] * vertex_count
lowercase__ : Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : str = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: Optional[int] = int(input("""Enter number of vertices: """).strip())
__UpperCamelCase: Union[str, Any] = int(input("""Enter number of edges: """).strip())
__UpperCamelCase: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase: List[str] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__UpperCamelCase: List[Any] = {"""src""": src, """dst""": dest, """weight""": weight}
__UpperCamelCase: Optional[int] = int(input("""\nEnter shortest path source:""").strip())
__UpperCamelCase: Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 266 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 52 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A_ )
__UpperCAmelCase =parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=A_ )
env_command_parser(subparsers=A_ )
launch_command_parser(subparsers=A_ )
tpu_command_parser(subparsers=A_ )
test_command_parser(subparsers=A_ )
# Let's go
__UpperCAmelCase =parser.parse_args()
if not hasattr(A_ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(A_ )
if __name__ == "__main__":
main()
| 68 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase : Tuple = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __str__( self ) -> str:
"""simple docstring"""
return F'''{self.data}'''
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase = None
def __iter__( self ) -> Iterator[T]:
"""simple docstring"""
UpperCamelCase = self.top
while node:
yield node.data
UpperCamelCase = node.next
def __str__( self ) -> str:
"""simple docstring"""
return "->".join([str(_a ) for item in self] )
def __len__( self ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def __UpperCamelCase ( self ) -> bool:
"""simple docstring"""
return self.top is None
def __UpperCamelCase ( self , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = Node(_a )
if not self.is_empty():
UpperCamelCase = self.top
UpperCamelCase = node
def __UpperCamelCase ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _a )
UpperCamelCase = self.top
UpperCamelCase = self.top.next
return pop_node.data
def __UpperCamelCase ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCamelCase ( self ) -> None:
"""simple docstring"""
UpperCamelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(lowercase , lowercase ):
UpperCamelCase = parquet_path
elif issubclass(lowercase , lowercase ):
UpperCamelCase = [parquet_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def A ( lowercase , lowercase , lowercase=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase , lowercase )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = ParquetDatasetReader({'train': parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCamelCase = {split: parquet_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': parquet_path, 'test': parquet_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase = {'image': [image_path]}
UpperCamelCase = Features({'image': Image()} )
UpperCamelCase = Dataset.from_dict(lowercase , features=lowercase )
UpperCamelCase = ParquetDatasetWriter(lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase ) == expected
| 3 | 0 |
import torch
def A__( ):
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
else:
_snake_case : Optional[int] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 304 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : List[str] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
_lowerCamelCase : Any = int(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=300 ) ->Any:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
_lowerCamelCase : str = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCamelCase : List[Any] = F'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 5
__snake_case = 0.2
def __init__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = 300 , ) -> List[str]:
_lowerCamelCase : Union[str, Any] = total
_lowerCamelCase : List[str] = '''''' if prefix is None else prefix
_lowerCamelCase : Optional[int] = leave
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[Any] = width
_lowerCamelCase : Any = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
def a__ ( self , _lowercase , _lowercase = False , _lowercase = None ) -> List[str]:
_lowerCamelCase : List[Any] = value
if comment is not None:
_lowerCamelCase : Dict = comment
if self.last_value is None:
_lowerCamelCase : List[str] = time.time()
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[int] = self.warmup
_lowerCamelCase : Dict = 1
self.update_bar(_lowercase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCamelCase : List[str] = time.time()
_lowerCamelCase : Union[str, Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCamelCase : int = self.elapsed_time / (value - self.start_value)
else:
_lowerCamelCase : Optional[Any] = None
if value >= self.total:
_lowerCamelCase : int = self.total
_lowerCamelCase : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCamelCase : str = self.average_time_per_item * (self.total - value)
self.update_bar(_lowercase )
_lowerCamelCase : List[str] = value
_lowerCamelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
_lowerCamelCase : List[str] = 1
else:
_lowerCamelCase : List[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a__ ( self , _lowercase , _lowercase=None ) -> Optional[int]:
_lowerCamelCase : Optional[Any] = ''' ''' * (len(str(self.total ) ) - len(str(_lowercase ) )) + str(_lowercase )
if self.elapsed_time is None:
_lowerCamelCase : Optional[Any] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowerCamelCase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
_lowerCamelCase : str = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a__ ( self ) -> Dict:
_lowerCamelCase : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCamelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def a__ ( self ) -> Union[str, Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None ) -> str:
super().__init__(_lowercase )
_lowerCamelCase : List[Any] = None if column_names is None else [column_names]
_lowerCamelCase : int = None
def a__ ( self ) -> List[Any]:
_lowerCamelCase : Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCamelCase : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def a__ ( self , _lowercase ) -> str:
if self.inner_table is None:
_lowerCamelCase : List[Any] = [list(values.keys() ), list(values.values() )]
else:
_lowerCamelCase : Union[str, Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowercase )
_lowerCamelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def a__ ( self , _lowercase , _lowercase=None , _lowercase=300 ) -> Optional[int]:
_lowerCamelCase : Tuple = NotebookProgressBar(_lowercase , prefix=_lowercase , parent=self , width=_lowercase )
return self.child_bar
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Union[str, Any] = None
self.display()
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self ) -> Optional[int]:
_lowerCamelCase : Any = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = False
def a__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[str]:
_lowerCamelCase : List[Any] = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_lowerCamelCase : Any = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Any = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_lowerCamelCase : str = NotebookTrainingTracker(state.max_steps , _lowercase )
def a__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[str]:
_lowerCamelCase : Optional[int] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
_lowerCamelCase : int = False
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ) -> List[str]:
if not has_length(_lowercase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCamelCase : Dict = self.training_tracker.add_child(len(_lowercase ) )
else:
_lowerCamelCase : Union[str, Any] = NotebookProgressBar(len(_lowercase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> str:
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCamelCase : List[str] = None
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ) -> List[str]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCamelCase : int = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCamelCase : Optional[Any] = state.global_step
self.training_tracker.write_line(_lowercase )
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ) -> Any:
if self.training_tracker is not None:
_lowerCamelCase : Tuple = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_lowerCamelCase : List[str] = log['''loss''']
break
if self.first_column == "Epoch":
_lowerCamelCase : Optional[int] = int(state.epoch )
else:
_lowerCamelCase : Dict = state.global_step
_lowerCamelCase : Tuple = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_lowerCamelCase : List[Any] = re.sub(R'''\_loss$''' , '''''' , _lowercase )
_lowerCamelCase : str = metrics.pop('''total_flos''' , _lowercase )
_lowerCamelCase : Tuple = metrics.pop('''epoch''' , _lowercase )
_lowerCamelCase : List[str] = metrics.pop(F'''{metric_key_prefix}_runtime''' , _lowercase )
_lowerCamelCase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _lowercase )
_lowerCamelCase : Dict = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _lowercase )
_lowerCamelCase : Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _lowercase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowerCamelCase : Optional[Any] = v
else:
_lowerCamelCase : List[Any] = k.split('''_''' )
_lowerCamelCase : Any = ''' '''.join([part.capitalize() for part in splits[1:]] )
_lowerCamelCase : Tuple = v
self.training_tracker.write_line(_lowercase )
self.training_tracker.remove_child()
_lowerCamelCase : Union[str, Any] = None
# Evaluation takes a long time so we should force the next update.
_lowerCamelCase : List[str] = True
def a__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Dict:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=_lowercase )
_lowerCamelCase : List[Any] = None
| 558 | """simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def a__ ( self ) -> Tuple:
super().setUp()
# fmt: off
_lowerCamelCase : List[str] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_lowerCamelCase : Any = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def a__ ( self , **_lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , **_lowercase ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def a__ ( self ) -> Tuple:
_lowerCamelCase : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Optional[int] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_lowerCamelCase : str = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCamelCase : Any = tokens + [tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
@require_ftfy
def a__ ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Any = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Any = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : Optional[int] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Any = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Union[str, Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : Dict = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : str = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : List[Any] = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCamelCase : str = F''' {text}'''
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
def a__ ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowercase ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def a__ ( self ) -> Tuple:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Tuple:
# CLIP always lower cases letters
pass
| 558 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase ) | 322 |
import os
UpperCamelCase__ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
while index < len(UpperCAmelCase_ ) - 1:
_lowercase : Any = SYMBOLS[numerals[index]]
_lowercase : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase__ ( UpperCAmelCase_ ) -> str:
'''simple docstring'''
_lowercase : List[str] = ''''''
_lowercase : Union[str, Any] = num // 1000
numerals += m_count * "M"
num %= 1000
_lowercase : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowercase : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase__ ( UpperCAmelCase_ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_lowercase : List[str] = 0
with open(os.path.dirname(UpperCAmelCase_ ) + roman_numerals_filename ) as filea:
_lowercase : Optional[Any] = filea.readlines()
for line in lines:
_lowercase : int = line.strip()
_lowercase : Dict = parse_roman_numerals(UpperCAmelCase_ )
_lowercase : Optional[Any] = generate_roman_numerals(UpperCAmelCase_ )
savings += len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""") | 322 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "mgp-str"
def __init__( self , lowerCAmelCase=[32, 128] , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=27 , lowerCAmelCase=38 , lowerCAmelCase=50257 , lowerCAmelCase=30522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=1e-5 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=0.02 , **lowerCAmelCase , ) -> List[Any]:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= image_size
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: str= num_channels
SCREAMING_SNAKE_CASE__: Any= max_token_length
SCREAMING_SNAKE_CASE__: List[str]= num_character_labels
SCREAMING_SNAKE_CASE__: str= num_bpe_labels
SCREAMING_SNAKE_CASE__: List[Any]= num_wordpiece_labels
SCREAMING_SNAKE_CASE__: int= hidden_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Dict= num_attention_heads
SCREAMING_SNAKE_CASE__: Optional[Any]= mlp_ratio
SCREAMING_SNAKE_CASE__: Any= distilled
SCREAMING_SNAKE_CASE__: Optional[Any]= layer_norm_eps
SCREAMING_SNAKE_CASE__: Tuple= drop_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= qkv_bias
SCREAMING_SNAKE_CASE__: List[Any]= attn_drop_rate
SCREAMING_SNAKE_CASE__: Dict= drop_path_rate
SCREAMING_SNAKE_CASE__: Tuple= output_aa_attentions
SCREAMING_SNAKE_CASE__: Union[str, Any]= initializer_range
| 704 | import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase_ : Any = random.Random()
def A__ ( snake_case_ : int , snake_case_ : Tuple=1.0 , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=10 , lowerCAmelCase=160 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=4000 , lowerCAmelCase=False , lowerCAmelCase=True , ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: List[Any]= min_seq_length
SCREAMING_SNAKE_CASE__: Tuple= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[int]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Union[str, Any]= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: Union[str, Any]= do_normalize
SCREAMING_SNAKE_CASE__: Optional[int]= feature_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= chunk_length
SCREAMING_SNAKE_CASE__: List[Any]= hop_length
def UpperCamelCase_ ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Optional[Any]:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: Dict= [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: Union[str, Any]= [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: str= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= WhisperFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__: List[str]= feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.feature_extraction_class.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__: int= feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE__: Any= feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__: List[str]= os.path.join(lowerCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.feature_extraction_class.from_json_file(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__: List[str]= feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: str= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: Tuple= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor(lowerCAmelCase , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[str]= feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: Union[str, Any]= feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[int]= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: List[Any]= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: Any= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: List[Any]= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE__: str= [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE__: str= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE__: str= [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE__: Dict= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: List[str]= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: List[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: Dict= np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Tuple= feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Any= feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: List[str]= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self ) -> int:
# fmt: off
SCREAMING_SNAKE_CASE__: Dict= torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
SCREAMING_SNAKE_CASE__: str= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Dict= WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE__: Optional[int]= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: Dict= self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE__: str= ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 107 | 0 |
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = 0
_A = 0
while num > 0:
_A = num % 8
_A = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
_A = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_SCREAMING_SNAKE_CASE )}"
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 27 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowercase_ = True
except (ImportError, ModuleNotFoundError):
lowercase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
re.sub('''<n>''', '''''', UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 708 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
a_ : Optional[Any] = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : List[str] = None
a_ : Optional[Any] = 2_0
a_ : Dict = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE__ )
# tweak scores to not be uniform anymore
a_ : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
a_ : Any = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
a_ : Any = jax.nn.softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
a_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
a_ : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
a_ : Any = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE__ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE__ ) , axis=-1 )
a_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE__ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : List[str] = None
a_ : Optional[int] = 1_0
a_ : Optional[int] = 2
# create ramp distribution
a_ : Dict = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, vocab_size) ).copy()
a_ : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
a_ : List[Any] = FlaxTopKLogitsWarper(3 )
a_ : Optional[Any] = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
a_ : Optional[Any] = 5
a_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
a_ : str = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, length) ).copy()
a_ : List[Any] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : List[str] = None
a_ : Optional[Any] = 1_0
a_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
a_ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
a_ : List[str] = np.exp(top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a_ : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
a_ : List[str] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a_ : str = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a_ : Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
a_ : int = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : Tuple = 2_0
a_ : Union[str, Any] = 4
a_ : Tuple = 0
a_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE__ )
# check that min length is applied at length 5
a_ : Union[str, Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
a_ : str = 5
a_ : int = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = min_dist_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
a_ : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = 1_5
a_ : Dict = min_dist_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = 2_0
a_ : Dict = 4
a_ : List[Any] = 0
a_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
# check that all scores are -inf except the bos_token_id score
a_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
a_ : str = 1
a_ : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a_ : str = 3
a_ : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ : Optional[Any] = 2_0
a_ : Tuple = 4
a_ : Any = 0
a_ : Any = 5
a_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
a_ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=2_0 )
a_ : Union[str, Any] = 4
a_ : str = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a_ : Union[str, Any] = 3
a_ : List[str] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = 4
a_ : int = 1_0
a_ : Any = 1_5
a_ : int = 2
a_ : List[str] = 1
a_ : Union[str, Any] = 1_5
# dummy input_ids and scores
a_ : Optional[int] = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = input_ids.copy()
a_ : str = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = scores.copy()
# instantiate all dist processors
a_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a_ : str = FlaxTopKLogitsWarper(3 )
a_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = 1_0
# no processor list
a_ : List[Any] = temp_dist_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = min_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = bos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = eos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# with processor list
a_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a_ : Any = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Dict = 4
a_ : Any = 1_0
a_ : Optional[Any] = 1_5
a_ : List[Any] = 2
a_ : Optional[int] = 1
a_ : List[Any] = 1_5
# dummy input_ids and scores
a_ : List[Any] = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE__ )
a_ : Dict = input_ids.copy()
a_ : Union[str, Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = scores.copy()
# instantiate all dist processors
a_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
a_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = 1_0
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
a_ : Optional[int] = temp_dist_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : int = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = min_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = bos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
a_ : int = eos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ):
a_ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a_ : List[Any] = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
return scores
a_ : Any = jax.jit(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = jax.jit(SCREAMING_SNAKE_CASE__ )
a_ : int = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : int = jitted_run_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 570 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = '▁'
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCAmelCase_ : Tuple = {
'google/reformer-crime-and-punishment': 52_4288,
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = VOCAB_FILES_NAMES
snake_case__ : str = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]=[] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> None:
a_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
a_ : Dict = vocab_file
a_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, int]:
a_ : Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Optional[Any]:
a_ : List[str] = self.__dict__.copy()
a_ : Any = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
a_ : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a_ : List[Any] = {}
a_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
if index < self.sp_model.get_piece_size():
a_ : List[str] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
a_ : Dict = []
a_ : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
a_ : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
a_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 570 | 1 |
def _UpperCAmelCase (UpperCamelCase_ : list[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = []
if len(UpperCamelCase_ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCamelCase_ ) ):
_lowerCAmelCase : Any = nums.pop(0 )
_lowerCAmelCase : Dict = permute(UpperCamelCase_ )
for perm in permutations:
perm.append(UpperCamelCase_ )
result.extend(UpperCamelCase_ )
nums.append(UpperCamelCase_ )
return result
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
def backtrack(UpperCamelCase_ : Union[str, Any] ):
if start == len(UpperCamelCase_ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCamelCase_ , len(UpperCamelCase_ ) ):
_lowerCAmelCase , _lowerCAmelCase : Dict = nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = nums[i], nums[start] # backtrack
_lowerCAmelCase : Tuple = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowerCamelCase : str = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 196 |
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase : List[Any] = datasets.logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
_lowerCamelCase : Union[str, Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
_lowerCamelCase : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
if self.config_name == "default":
_lowerCAmelCase : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
_lowerCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=False ) -> int:
'''simple docstring'''
if gpus is None:
_lowerCAmelCase : Tuple = 1 if torch.cuda.is_available() else 0
_lowerCAmelCase : Optional[int] = {"""src""": sources, """mt""": predictions, """ref""": references}
_lowerCAmelCase : List[Any] = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 196 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 179 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =torch.exp(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowerCamelCase : Optional[int] =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , __lowercase :int ):
super().__init__()
__lowerCamelCase : str =config.output_attentions
__lowerCamelCase : List[Any] =config.output_hidden_states
__lowerCamelCase : Dict =nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : str =nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self :Union[str, Any] , __lowercase :Union[str, Any] ):
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCamelCase : Tuple =x
else:
__lowerCamelCase : Any =x
def __lowercase ( self :Union[str, Any] , __lowercase :Tuple ):
__lowerCamelCase : Union[str, Any] =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self :Tuple , __lowercase :Optional[int] , __lowercase :Dict=None , __lowercase :Union[str, Any]=None , __lowercase :List[str]=None , __lowercase :str=None , ):
__lowerCamelCase : Any =()
__lowerCamelCase : List[str] =()
__lowerCamelCase : Optional[int] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCamelCase : int =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
__lowerCamelCase : Optional[int] =layer_outputs[0]
if self.output_attentions:
__lowerCamelCase : Optional[Any] =all_attentions + (layer_outputs[1],)
__lowerCamelCase : Any =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Dict =current_outputs + (all_attentions,)
__lowerCamelCase : str =self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
__lowerCamelCase : Tuple =highway_exit[0]
__lowerCamelCase : Tuple =entropy(__lowercase )
__lowerCamelCase : Tuple =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCamelCase : Optional[int] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCamelCase : Dict =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
__lowerCamelCase : Union[str, Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Tuple =outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Optional[int] =outputs + (all_attentions,)
__lowerCamelCase : int =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str ):
super().__init__(__lowercase )
__lowerCamelCase : Union[str, Any] =config
__lowerCamelCase : List[str] =BertEmbeddings(__lowercase )
__lowerCamelCase : Dict =DeeBertEncoder(__lowercase )
__lowerCamelCase : List[Any] =BertPooler(__lowercase )
self.init_weights()
def __lowercase ( self :Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self :Dict ):
return self.embeddings.word_embeddings
def __lowercase ( self :List[str] , __lowercase :int ):
__lowerCamelCase : Union[str, Any] =value
def __lowercase ( self :List[Any] , __lowercase :Dict ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :List[str]=None , __lowercase :List[Any]=None , __lowercase :Any=None , __lowercase :Tuple=None , __lowercase :Union[str, Any]=None , __lowercase :Optional[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Tuple=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase : List[str] =input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase : str =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase : Optional[int] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase : str =torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
__lowerCamelCase : Tuple =torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCamelCase : List[Any] =torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase : torch.Tensor =self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCamelCase : List[str] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCamelCase : Any =encoder_attention_mask[:, None, None, :]
__lowerCamelCase : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCamelCase : List[str] =(1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase : Union[str, Any] =self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCamelCase : str =self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCamelCase : Dict =self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCamelCase : int =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
__lowerCamelCase : int =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :Optional[Any] , __lowercase :Dict ):
__lowerCamelCase : List[Any] =message
__lowerCamelCase : int =exit_layer # start from 1!
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Any , __lowercase :str ):
super().__init__()
__lowerCamelCase : str =BertPooler(__lowercase )
__lowerCamelCase : Union[str, Any] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : List[str] =nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] ):
# Pooler
__lowerCamelCase : Optional[Any] =encoder_outputs[0]
__lowerCamelCase : Any =self.pooler(__lowercase )
# "return" pooler_output
# BertModel
__lowerCamelCase : List[str] =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCamelCase : List[Any] =bmodel_output[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : int =self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :Dict ):
super().__init__(__lowercase )
__lowerCamelCase : Any =config.num_labels
__lowerCamelCase : int =config.num_hidden_layers
__lowerCamelCase : Tuple =DeeBertModel(__lowercase )
__lowerCamelCase : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : Optional[int] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :List[str] , __lowercase :List[str]=None , __lowercase :str=None , __lowercase :Optional[Any]=None , __lowercase :List[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Dict=None , __lowercase :int=None , __lowercase :int=-1 , __lowercase :List[str]=False , ):
__lowerCamelCase : Union[str, Any] =self.num_layers
try:
__lowerCamelCase : Union[str, Any] =self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCamelCase : List[Any] =outputs[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : Tuple =self.classifier(__lowercase )
__lowerCamelCase : int =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase : Union[str, Any] =e.message
__lowerCamelCase : Optional[Any] =e.exit_layer
__lowerCamelCase : Any =outputs[0]
if not self.training:
__lowerCamelCase : List[Any] =entropy(__lowercase )
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Union[str, Any] =MSELoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Dict =CrossEntropyLoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase : str =[]
for highway_exit in outputs[-1]:
__lowerCamelCase : List[str] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Optional[int] =MSELoss()
__lowerCamelCase : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : int =CrossEntropyLoss()
__lowerCamelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
__lowerCamelCase : Dict =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase : List[str] =(loss,) + outputs
if not self.training:
__lowerCamelCase : List[Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 179 | 1 |
def a__ ( a ) -> List[Any]:
A_ : List[Any] = 1
A_ : Tuple = 2
while i * i <= n:
A_ : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ) -> Optional[int]:
A_ : Optional[int] = 1
A_ : int = 1
while True:
i += 1
t_num += i
if count_divisors(a ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 236 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = True , __magic_name__ = None , __magic_name__ = True , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : List[str] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
A_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Optional[int] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ , param_name='''crop_size''' )
A_ : Any = do_resize
A_ : Any = size
A_ : str = resample
A_ : str = do_center_crop
A_ : Dict = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Tuple = do_normalize
A_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Optional[Any] = do_convert_rgb
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A_ : List[Any] = get_resize_output_image_size(__magic_name__ , size=size['''shortest_edge'''] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size['''height'''], size['''width''']) , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : int = get_size_dict(__magic_name__ , param_name='''size''' , default_to_square=__magic_name__ )
A_ : List[str] = resample if resample is not None else self.resample
A_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : str = get_size_dict(__magic_name__ , param_name='''crop_size''' , default_to_square=__magic_name__ )
A_ : int = do_rescale if do_rescale is not None else self.do_rescale
A_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : List[str] = image_std if image_std is not None else self.image_std
A_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(__magic_name__ ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
A_ : List[Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
A_ : Union[str, Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
A_ : Dict = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
A_ : str = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 236 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 52 |
'''simple docstring'''
from __future__ import annotations
def __a ( A__ ) -> int:
if not nums:
return 0
lowerCAmelCase = nums[0]
lowerCAmelCase = 0
for num in nums[1:]:
lowerCAmelCase , lowerCAmelCase = (
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def lowercase_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase_ ( self ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def lowercase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "apply_ocr" ) )
def lowercase_ ( self ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase_ ( self ):
# with apply_OCR = True
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
A_ = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 608 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = value
A_ = None
A_ = None
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = tree
def lowercase_ ( self , __UpperCamelCase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE: str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Any ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[str] , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : Any ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def __a ( self : Dict , **snake_case__ : Dict ):
"""simple docstring"""
return {}, {}, {}
def __a ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(snake_case__ )
SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def __a ( self : Any , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model(**snake_case__ )
return model_outputs
def __a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=snake_case__ )
SCREAMING_SNAKE_CASE_ = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE_ = (output * 2_55 / np.max(snake_case__ )).astype('uint8' )
SCREAMING_SNAKE_CASE_ = Image.fromarray(snake_case__ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = predicted_depth
SCREAMING_SNAKE_CASE_ = depth
return output_dict | 360 |
SCREAMING_SNAKE_CASE: Optional[int] = {str(digit): digit**5 for digit in range(1_0)}
def _a ( lowerCAmelCase )-> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _a ( )-> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution()) | 360 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __magic_name__ ( __a : bool = True , *__a : Any , **__a : List[Any] ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase__ = False
if main_process_only:
UpperCamelCase__ = PartialState().local_process_index == 0
return _tqdm(*__a , **__a , disable=__a )
| 718 |
from ..utils import DummyObject, requires_backends
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 86 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __A(lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_UpperCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = dct.pop(lowerCAmelCase )
_UpperCamelCase = val
def __A(lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_UpperCamelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
_UpperCamelCase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase )
_UpperCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase = 1_0_2_4
_UpperCamelCase = 4_0_9_6
_UpperCamelCase = 2_4
_UpperCamelCase = 1_6
_UpperCamelCase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = False
_UpperCamelCase = """relu"""
_UpperCamelCase = 1_0_2_4
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
# load HuggingFace model
_UpperCamelCase = ViTModel(lowerCAmelCase , add_pooling_layer=lowerCAmelCase )
_UpperCamelCase = TrOCRForCausalLM(lowerCAmelCase )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=lowerCAmelCase , decoder=lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )["""model"""]
_UpperCamelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(lowerCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
# load state dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image
_UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
_UpperCamelCase = TrOCRProcessor(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = processor(images=prepare_img(lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
_UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase = model(pixel_values=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 612 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = ["pixel_values"]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 3_84}
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCamelCase = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
_UpperCamelCase = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> Optional[int]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612 | 1 |
import argparse
import copy
def _a ( lowerCamelCase__ ) -> Tuple:
lowerCamelCase_ : Optional[Any] = {}
with open(lowerCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCamelCase_ : Dict = []
_list.append([line.split()[1], line.split()[2]] )
lowerCamelCase_ : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCamelCase_ : str = []
_list.append([line.split()[0], line.split()[2]] )
lowerCamelCase_ : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ ) as f:
lowerCamelCase_ : List[Any] = f.read(1 )
lowerCamelCase_ : List[Any] = start_node
lowerCamelCase_ : Any = []
lowerCamelCase_ : Optional[Any] = start_node
lowerCamelCase_ : List[str] = 0
while visiting not in first_solution:
lowerCamelCase_ : Optional[Any] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase__ ) and k[0] not in first_solution:
lowerCamelCase_ : Optional[int] = k[1]
lowerCamelCase_ : Tuple = k[0]
first_solution.append(lowerCamelCase__ )
lowerCamelCase_ : List[Any] = distance_of_first_solution + int(lowerCamelCase__ )
lowerCamelCase_ : int = best_node
first_solution.append(lowerCamelCase__ )
lowerCamelCase_ : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCamelCase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
lowerCamelCase_ : List[Any] = []
for n in solution[1:-1]:
lowerCamelCase_ : Union[str, Any] = solution.index(lowerCamelCase__ )
for kn in solution[1:-1]:
lowerCamelCase_ : Optional[int] = solution.index(lowerCamelCase__ )
if n == kn:
continue
lowerCamelCase_ : List[str] = copy.deepcopy(lowerCamelCase__ )
lowerCamelCase_ : Tuple = kn
lowerCamelCase_ : List[str] = n
lowerCamelCase_ : Tuple = 0
for k in _tmp[:-1]:
lowerCamelCase_ : Tuple = _tmp[_tmp.index(lowerCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCamelCase_ : Dict = distance + int(i[1] )
_tmp.append(lowerCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCamelCase_ : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : str = first_solution
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = distance_of_first_solution
lowerCamelCase_ : Any = solution
while count <= iters:
lowerCamelCase_ : int = find_neighborhood(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Tuple = neighborhood[index_of_best_solution]
lowerCamelCase_ : Any = len(lowerCamelCase__ ) - 1
lowerCamelCase_ : List[Any] = False
while not found:
lowerCamelCase_ : str = 0
while i < len(lowerCamelCase__ ):
if best_solution[i] != solution[i]:
lowerCamelCase_ : Optional[Any] = best_solution[i]
lowerCamelCase_ : int = solution[i]
break
lowerCamelCase_ : Optional[int] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCamelCase_ : str = True
lowerCamelCase_ : Any = best_solution[:-1]
lowerCamelCase_ : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCamelCase_ : Optional[int] = cost
lowerCamelCase_ : Any = solution
else:
lowerCamelCase_ : Any = index_of_best_solution + 1
lowerCamelCase_ : Tuple = neighborhood[index_of_best_solution]
if len(lowerCamelCase__ ) >= size:
tabu_list.pop(0 )
lowerCamelCase_ : Any = count + 1
return best_solution_ever, best_cost
def _a ( lowerCamelCase__=None ) -> str:
lowerCamelCase_ : Tuple = generate_neighbours(args.File )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = generate_first_solution(
args.File , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : str = tabu_search(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 144 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : List[str] = 'gpt_neox'
def __init__(self : int , _snake_case : List[str]=5_0432 , _snake_case : List[Any]=6144 , _snake_case : Optional[Any]=44 , _snake_case : Dict=64 , _snake_case : Optional[Any]=2_4576 , _snake_case : str="gelu" , _snake_case : Optional[Any]=0.25 , _snake_case : int=1_0000 , _snake_case : int=0.0 , _snake_case : Any=0.0 , _snake_case : List[str]=0.1 , _snake_case : str=2048 , _snake_case : str=0.02 , _snake_case : Dict=1e-5 , _snake_case : int=True , _snake_case : str=0 , _snake_case : Tuple=2 , _snake_case : Tuple=False , _snake_case : int=True , _snake_case : List[str]=None , **_snake_case : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Optional[Any] = rotary_pct
lowerCamelCase_ : Tuple = rotary_emb_base
lowerCamelCase_ : List[Any] = attention_dropout
lowerCamelCase_ : int = hidden_dropout
lowerCamelCase_ : List[Any] = classifier_dropout
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Dict = tie_word_embeddings
lowerCamelCase_ : int = use_parallel_residual
lowerCamelCase_ : Dict = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCamelCase_ : List[str] = self.rope_scaling.get('type' , _snake_case )
lowerCamelCase_ : Any = self.rope_scaling.get('factor' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 144 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a : Dict = logging.get_logger(__name__)
class __UpperCAmelCase( snake_case__ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 218 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 516 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = ["""model.decoder.embed_positions.weights"""]
def lowerCamelCase__ ( UpperCAmelCase_ )-> Any:
"""simple docstring"""
if "emb" in name:
UpperCamelCase = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCamelCase = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCamelCase = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCamelCase = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCamelCase = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCamelCase = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCamelCase = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCamelCase = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCamelCase = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCamelCase = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple[Dict, Dict]:
"""simple docstring"""
UpperCamelCase = list(state_dict.keys() )
UpperCamelCase = {}
for key in keys:
UpperCamelCase = state_dict.pop(UpperCAmelCase_ )
UpperCamelCase = rename_keys(UpperCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase = val[:hidden_size, :]
UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase = val
else:
UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase__ ( UpperCAmelCase_ )-> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
UpperCamelCase = 10_24
UpperCamelCase = 24
UpperCamelCase = 16
elif checkpoint == "medium":
UpperCamelCase = 15_36
UpperCamelCase = 48
UpperCamelCase = 24
elif checkpoint == "large":
UpperCamelCase = 20_48
UpperCamelCase = 48
UpperCamelCase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , )
return config
@torch.no_grad()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="cpu" )-> int:
"""simple docstring"""
UpperCamelCase = MusicGen.get_pretrained(UpperCAmelCase_ , device=UpperCAmelCase_ )
UpperCamelCase = decoder_config_from_checkpoint(UpperCAmelCase_ )
UpperCamelCase = fairseq_model.lm.state_dict()
UpperCamelCase , UpperCamelCase = rename_state_dict(
UpperCAmelCase_ , hidden_size=decoder_config.hidden_size )
UpperCamelCase = TaEncoderModel.from_pretrained("t5-base" )
UpperCamelCase = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCamelCase = MusicgenForCausalLM(UpperCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase , UpperCamelCase = decoder.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase_ , audio_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase_ )
# check we can do a forward pass
UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase = model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCamelCase = AutoTokenizer.from_pretrained("t5-base" )
UpperCamelCase = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCamelCase = MusicgenProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# set the appropriate bos/pad token ids
UpperCamelCase = 20_48
UpperCamelCase = 20_48
# set other default generation config params
UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase = True
UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(UpperCAmelCase_ )
processor.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 556 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
class __a ( _lowerCAmelCase ):
pass
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = int(os.environ["RANK"] )
UpperCamelCase = int(os.environ["WORLD_SIZE"] )
UpperCamelCase = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase_ )
parser.add_argument("--local_rank" , type=UpperCAmelCase_ )
parser.add_argument("--num_workers" , type=UpperCAmelCase_ , default=0 )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.streaming
UpperCamelCase = args.num_workers
UpperCamelCase = {"shards": [F"shard_{shard_idx}" for shard_idx in range(UpperCAmelCase_ )]}
UpperCamelCase = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
UpperCamelCase = Dataset.from_list(list(UpperCAmelCase_ ) )
UpperCamelCase = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
UpperCamelCase = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 556 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =ConvBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__lowercase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
__lowercase : List[str] = do_lower_case
__lowercase : List[str] = strip_accents
__lowercase : Tuple = tokenize_chinese_chars
__lowercase : Optional[int] = normalizer_class(**UpperCamelCase_ )
__lowercase : Union[str, Any] = do_lower_case
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]:
__lowercase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : List[Any] = [self.sep_token_id]
__lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
__lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 76 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class A ( a ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str=0.9_99, lowerCamelCase__ : List[Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.0_001 , snake_case_ = 0.02 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = True , snake_case_ = True , snake_case_ = 0 , snake_case_ = "epsilon" , snake_case_ = 1.0 , **snake_case_ , ) -> Optional[Any]:
if kwargs.get("set_alpha_to_one" , snake_case_ ) is not None:
_a = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , snake_case_ , standard_warn=snake_case_ )
_a = kwargs["set_alpha_to_one"]
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_a = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_a = 1.0
# setable values
_a = None
_a = torch.from_numpy(np.arange(0 , snake_case_ ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> torch.FloatTensor:
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Optional[Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_a = num_inference_steps
_a = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round().copy().astype(np.intaa )
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 0.0 , snake_case_ = False , snake_case_ = None , snake_case_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_a = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_a = self.alphas_cumprod[timestep]
_a = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_a = model_output
elif self.config.prediction_type == "sample":
_a = model_output
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_a = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 131 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCAmelCase ( A__ , A__=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def _lowerCAmelCase ( A__ , A__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item.replace('in_layers.0' , 'norm1' )
lowercase__ = new_item.replace('in_layers.2' , 'conv1' )
lowercase__ = new_item.replace('out_layers.0' , 'norm2' )
lowercase__ = new_item.replace('out_layers.3' , 'conv2' )
lowercase__ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowercase__ = new_item.replace('skip_connection' , 'conv_shortcut' )
lowercase__ = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _lowerCAmelCase ( A__ , A__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item
lowercase__ = new_item.replace('norm.weight' , 'group_norm.weight' )
lowercase__ = new_item.replace('norm.bias' , 'group_norm.bias' )
lowercase__ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowercase__ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowercase__ = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _lowerCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None ):
assert isinstance(A__ , A__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ = old_checkpoint[path]
lowercase__ = old_tensor.shape[0] // 3
lowercase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ = old_tensor.shape[0] // config['num_head_channels'] // 3
lowercase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__, lowercase__, lowercase__ = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ = query.reshape(A__ )
lowercase__ = key.reshape(A__ )
lowercase__ = value.reshape(A__ )
for path in paths:
lowercase__ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowercase__ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowercase__ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ = old_checkpoint[path['old']][:, :, 0]
else:
lowercase__ = old_checkpoint[path['old']]
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = {}
lowercase__ = checkpoint['time_embed.0.weight']
lowercase__ = checkpoint['time_embed.0.bias']
lowercase__ = checkpoint['time_embed.2.weight']
lowercase__ = checkpoint['time_embed.2.bias']
lowercase__ = checkpoint['input_blocks.0.0.weight']
lowercase__ = checkpoint['input_blocks.0.0.bias']
lowercase__ = checkpoint['out.0.weight']
lowercase__ = checkpoint['out.0.bias']
lowercase__ = checkpoint['out.2.weight']
lowercase__ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the output blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(A__ )
}
for i in range(1 , A__ ):
lowercase__ = (i - 1) // (config['num_res_blocks'] + 1)
lowercase__ = (i - 1) % (config['num_res_blocks'] + 1)
lowercase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase__ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase__ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase__ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path, resnet_op] , config=A__ )
if len(A__ ):
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=A__ , config=A__ , )
lowercase__ = middle_blocks[0]
lowercase__ = middle_blocks[1]
lowercase__ = middle_blocks[2]
lowercase__ = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
lowercase__ = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , attention_paths_to_split=A__ , config=A__ )
for i in range(A__ ):
lowercase__ = i // (config['num_res_blocks'] + 1)
lowercase__ = i % (config['num_res_blocks'] + 1)
lowercase__ = [shave_segments(A__ , 2 ) for name in output_blocks[i]]
lowercase__ = {}
for layer in output_block_layers:
lowercase__, lowercase__ = layer.split('.' )[0], shave_segments(A__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A__ )
else:
lowercase__ = [layer_name]
if len(A__ ) > 1:
lowercase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowercase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(A__ ) == 2:
lowercase__ = []
if len(A__ ):
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=A__ , )
else:
lowercase__ = renew_resnet_paths(A__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ = '.'.join(['output_blocks', str(A__ ), path['old']] )
lowercase__ = '.'.join(['up_blocks', str(A__ ), 'resnets', str(A__ ), path['new']] )
lowercase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
a__ : List[str] = parser.parse_args()
a__ : Union[str, Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a__ : Optional[Any] = json.loads(f.read())
a__ : List[str] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a__ : Any = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
a__ : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
a__ : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 642 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __UpperCAmelCase ( __a : Any ,__a : List[str]=False ) -> List[Any]:
"""simple docstring"""
_a , _a : List[Any] = create_model(
'''HTSAT-tiny''' ,'''roberta''' ,__a ,precision='''fp32''' ,device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' ,enable_fusion=__a ,fusion_type='''aff_2d''' if enable_fusion else None ,)
return model, model_cfg
def __UpperCAmelCase ( __a : str ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = {}
_a : int = R'''.*sequential.(\d+).*'''
_a : List[Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : List[Any] = key.replace(__a ,__a )
if re.match(__a ,__a ):
# replace sequential layers with list
_a : int = re.match(__a ,__a ).group(1 )
_a : List[str] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(__a )//3}.linear.""" )
elif re.match(__a ,__a ):
_a : Optional[Any] = int(re.match(__a ,__a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[int] = 1 if projecton_layer == 0 else 2
_a : List[str] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : List[str] = value
_a : Union[str, Any] = mixed_qkv.size(0 ) // 3
_a : Tuple = mixed_qkv[:qkv_dim]
_a : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Dict = mixed_qkv[qkv_dim * 2 :]
_a : Optional[int] = query_layer
_a : Tuple = key_layer
_a : Tuple = value_layer
else:
_a : Tuple = value
return model_state_dict
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ,__a : Dict ,__a : Dict=False ) -> Any:
"""simple docstring"""
_a , _a : Optional[Any] = init_clap(__a ,enable_fusion=__a )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Union[str, Any] = rename_state_dict(__a )
_a : Union[str, Any] = ClapConfig()
_a : Dict = enable_fusion
_a : Dict = ClapModel(__a )
# ignore the spectrogram embedding layer
model.load_state_dict(__a ,strict=__a )
model.save_pretrained(__a )
transformers_config.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __lowercase ( a__ ):
_lowerCAmelCase = "swin2sr"
_lowerCAmelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , lowercase__ : Optional[Any]=6_4 , lowercase__ : Tuple=1 , lowercase__ : Any=3 , lowercase__ : str=1_8_0 , lowercase__ : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase__ : Optional[int]=[6, 6, 6, 6, 6, 6] , lowercase__ : Any=8 , lowercase__ : List[str]=2.0 , lowercase__ : List[Any]=True , lowercase__ : str=0.0 , lowercase__ : Any=0.0 , lowercase__ : List[str]=0.1 , lowercase__ : int="gelu" , lowercase__ : List[str]=False , lowercase__ : Dict=0.02 , lowercase__ : Union[str, Any]=1e-5 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=1.0 , lowercase__ : Union[str, Any]="1conv" , lowercase__ : int="pixelshuffle" , **lowercase__ : Tuple , ):
super().__init__(**lowercase__ )
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = embed_dim
a_ = depths
a_ = len(lowercase__ )
a_ = num_heads
a_ = window_size
a_ = mlp_ratio
a_ = qkv_bias
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = drop_path_rate
a_ = hidden_act
a_ = use_absolute_embeddings
a_ = layer_norm_eps
a_ = initializer_range
a_ = upscale
a_ = img_range
a_ = resi_connection
a_ = upsampler
| 143 |
import qiskit
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = qiskit.Aer.get_backend('''aer_simulator''' )
a_ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
a_ = qiskit.execute(_A , _A , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_A )
if __name__ == "__main__":
UpperCamelCase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 143 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ = logging.getLogger(__name__)
def lowerCamelCase__ ( a : str ) -> List[str]:
"""simple docstring"""
a__ :Optional[Any] = git.Repo(search_parent_directories=__lowercase )
a__ :List[str] = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__lowercase , "git_log.json" ) , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=4 )
def lowerCamelCase__ ( a : str ) -> List[Any]:
"""simple docstring"""
if params.n_gpu <= 0:
a__ :Tuple = 0
a__ :Union[str, Any] = -1
a__ :Optional[Any] = True
a__ :Optional[int] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
a__ :List[Any] = int(os.environ["WORLD_SIZE"] )
a__ :List[Any] = int(os.environ["N_GPU_NODE"] )
a__ :Dict = int(os.environ["RANK"] )
# number of nodes / node ID
a__ :List[Any] = params.world_size // params.n_gpu_per_node
a__ :List[Any] = params.global_rank // params.n_gpu_per_node
a__ :Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
a__ :Optional[Any] = 1
a__ :Optional[int] = 0
a__ :List[Any] = 0
a__ :Union[str, Any] = 0
a__ :Tuple = 1
a__ :Union[str, Any] = 1
a__ :str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a__ :int = params.node_id == 0 and params.local_rank == 0
a__ :Optional[Any] = params.n_nodes > 1
# summary
a__ :int = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCamelCase__ ( a : str ) -> int:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 395 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[int] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 637 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase = datasets.logging.get_logger(__name__)
_lowerCamelCase = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowerCamelCase = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowerCamelCase = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any="dummy_doc" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = {doc: key_lines}
__SCREAMING_SNAKE_CASE : Dict = {doc: sys_lines}
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__SCREAMING_SNAKE_CASE : Optional[Any] = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__SCREAMING_SNAKE_CASE : List[str] = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
__SCREAMING_SNAKE_CASE : Tuple = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__SCREAMING_SNAKE_CASE : Dict = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__SCREAMING_SNAKE_CASE : List[str] = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : Dict = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
for name, metric in metrics:
__SCREAMING_SNAKE_CASE : Union[str, Any] = evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
__SCREAMING_SNAKE_CASE : str = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__SCREAMING_SNAKE_CASE : List[Any] = line.split()[5]
if not parse_col == "-":
__SCREAMING_SNAKE_CASE : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE (datasets.Metric ):
def __snake_case ( self : Tuple )->List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __snake_case ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Dict=False )->List[str]:
__SCREAMING_SNAKE_CASE : int = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__SCREAMING_SNAKE_CASE : Any = util.check_gold_parse_annotation(_lowercase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__SCREAMING_SNAKE_CASE : Optional[int] = evaluate(
key_lines=_lowercase , sys_lines=_lowercase , metrics=_lowercase , NP_only=_lowercase , remove_nested=_lowercase , keep_singletons=_lowercase , min_span=_lowercase , )
return score
| 721 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
_lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
_lowerCamelCase = args.per_device_eval_batch_size
_lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase = True
_lowerCamelCase = """temp_engine/bert-fp32.engine"""
if args.fpaa:
_lowerCamelCase = """temp_engine/bert-fp16.engine"""
if args.inta:
_lowerCamelCase = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
_lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs["input_ids"] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Any = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : str = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowerCamelCase )
# start time
__SCREAMING_SNAKE_CASE : str = time.time()
# Run inference
context.execute_async(
bindings=[int(__lowerCamelCase ) for d_inp in d_inputs] + [int(__lowerCamelCase ), int(__lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__SCREAMING_SNAKE_CASE : List[Any] = time.time()
__SCREAMING_SNAKE_CASE : List[Any] = end_time - start_time
__SCREAMING_SNAKE_CASE : Tuple = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase = raw_datasets["""validation"""].column_names
_lowerCamelCase = """question""" if """question""" in column_names else column_names[0]
_lowerCamelCase = """context""" if """context""" in column_names else column_names[1]
_lowerCamelCase = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=__lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_examples.sequence_ids(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__SCREAMING_SNAKE_CASE : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__SCREAMING_SNAKE_CASE : Optional[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
_lowerCamelCase = raw_datasets["""validation"""]
# Validation Feature Creation
_lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
_lowerCamelCase = default_data_collator
_lowerCamelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
_lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int="eval" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = postprocess_qa_predictions(
examples=__lowerCamelCase , features=__lowerCamelCase , predictions=__lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__SCREAMING_SNAKE_CASE : Optional[int] = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
__SCREAMING_SNAKE_CASE : Optional[Any] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__lowerCamelCase , label_ids=__lowerCamelCase )
_lowerCamelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__lowerCamelCase ) ) * engine.get_binding_dtype(__lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_lowerCamelCase = 0.0
_lowerCamelCase = 0
_lowerCamelCase = timeit.default_timer()
_lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase , _lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = torch.tensor(start_logits)
_lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
_lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 447 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> Dict:
UpperCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ : str = ''''''
else:
UpperCAmelCase__ : Union[str, Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCAmelCase__ : List[str] = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : List[Any] = dct.pop(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = ViTMSNConfig()
UpperCAmelCase__ : Optional[int] = 10_00
UpperCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
UpperCAmelCase__ : str = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : List[str] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) )
UpperCAmelCase__ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCAmelCase__ : List[str] = 3_84
UpperCAmelCase__ : Dict = 15_36
UpperCAmelCase__ : Tuple = 6
elif "l16" in checkpoint_url:
UpperCAmelCase__ : List[str] = 10_24
UpperCAmelCase__ : str = 40_96
UpperCAmelCase__ : Tuple = 24
UpperCAmelCase__ : Optional[int] = 16
UpperCAmelCase__ : Optional[int] = 0.1
elif "b4" in checkpoint_url:
UpperCAmelCase__ : List[str] = 4
elif "l7" in checkpoint_url:
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : int = 10_24
UpperCAmelCase__ : Dict = 40_96
UpperCAmelCase__ : Any = 24
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : Any = 0.1
UpperCAmelCase__ : str = ViTMSNModel(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder''']
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
UpperCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase__ : Optional[Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCAmelCase__ : Any = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
UpperCAmelCase__ : Optional[int] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
UpperCAmelCase__ : int = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
UpperCAmelCase__ : str = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
UpperCAmelCase__ : Tuple = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 75 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase (snake_case__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(snake_case__ )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 169 | 0 |
"""simple docstring"""
import numpy
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : numpy.ndarray):
a : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a : Union[str, Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a : Tuple = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a : Dict = numpy.random.rand(3 , 1)
# Real output values provided.
a : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a : int = numpy.zeros(output_array.shape)
def __snake_case ( self : List[str]):
a : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self : int):
a : Tuple = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
a : Tuple = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
a : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self : int , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : bool):
for iteration in range(1 , iterations + 1):
a : List[str] = self.feedforward()
self.back_propagation()
if give_loss:
a : List[str] = numpy.mean(numpy.square(output - self.feedforward()))
print(f'''Iteration {iteration} Loss: {loss}''')
def __snake_case ( self : int , __UpperCAmelCase : numpy.ndarray):
a : Optional[Any] = input_arr
a : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
a : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
a : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def lowercase ( A_ )-> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase ( A_ )-> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase ( )-> int:
'''simple docstring'''
a : Union[str, Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a : str = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a : int = TwoHiddenLayerNeuralNetwork(
input_array=A_ , output_array=A_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=A_ , iterations=10 , give_loss=A_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 135 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
with open(A_ ) as metadata_file:
a : int = json.load(A_ )
a : int = LukeConfig(use_entity_aware_attention=A_ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
a : Union[str, Any] = torch.load(A_ , map_location="cpu" )
# Load the entity vocab file
a : Optional[int] = load_entity_vocab(A_ )
a : Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a : str = AddedToken("<ent>" , lstrip=A_ , rstrip=A_ )
a : Optional[int] = AddedToken("<ent2>" , lstrip=A_ , rstrip=A_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A_ )
with open(os.path.join(A_ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A_ , A_ )
a : int = LukeTokenizer.from_pretrained(A_ )
# Initialize the embeddings of the special tokens
a : int = state_dict["embeddings.word_embeddings.weight"]
a : str = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
a : Dict = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
a : int = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a : Any = F'''encoder.layer.{layer_index}.attention.self.'''
a : Dict = state_dict[prefix + matrix_name]
a : Optional[int] = state_dict[prefix + matrix_name]
a : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a : Optional[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
a : Dict = entity_emb[entity_vocab["[MASK]"]]
a : Union[str, Any] = LukeModel(config=A_ ).eval()
a , a : int = model.load_state_dict(A_ , strict=A_ )
if not (len(A_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(A_ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
a : List[Any] = LukeTokenizer.from_pretrained(A_ , task="entity_classification" )
a : Union[str, Any] = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
a : int = (39, 42)
a : List[str] = tokenizer(A_ , entity_spans=[span] , add_prefix_space=A_ , return_tensors="pt" )
a : Tuple = model(**A_ )
# Verify word hidden states
if model_size == "large":
a : List[Any] = torch.Size((1, 42, 1_024) )
a : Tuple = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
a : Union[str, Any] = torch.Size((1, 42, 768) )
a : List[str] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
a : Tuple = torch.Size((1, 1, 1_024) )
a : Optional[int] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
a : Any = torch.Size((1, 1, 768) )
a : List[Any] = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A_ ) )
model.save_pretrained(A_ )
def lowercase ( A_ )-> int:
'''simple docstring'''
a : List[str] = {}
with open(A_ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A_ ):
a , a : int = line.rstrip().split("\t" )
a : Any = index
return entity_vocab
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 135 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
def decorator(UpperCAmelCase_ ):
UpperCamelCase = getattr(UpperCAmelCase_ , "handle_key" , [] )
handle += [key]
setattr(UpperCAmelCase_ , "handle_key" , UpperCAmelCase_ )
return func
return decorator
def lowerCamelCase__ ( *UpperCAmelCase_ )-> Any:
"""simple docstring"""
def decorator(UpperCAmelCase_ ):
UpperCamelCase = getattr(UpperCAmelCase_ , "handle_key" , [] )
handle += keys
setattr(UpperCAmelCase_ , "handle_key" , UpperCAmelCase_ )
return func
return decorator
class __a ( _lowerCAmelCase ):
def __new__( cls : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCamelCase = super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , "key_handler" ):
setattr(UpperCAmelCase_ , "key_handler" , {} )
setattr(UpperCAmelCase_ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase = getattr(UpperCAmelCase_ , "handle_key" , [] )
for key in handled_keys:
UpperCamelCase = value
return new_cls
@staticmethod
def _SCREAMING_SNAKE_CASE ( cls : str )-> str:
"""simple docstring"""
UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase = ord(UpperCAmelCase_ )
UpperCamelCase = cls.key_handler.get(UpperCAmelCase_ )
if handler:
UpperCamelCase = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls )-> List[str]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 554 |
"""simple docstring"""
class __a :
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = name
UpperCamelCase = value
UpperCamelCase = weight
def __repr__( self : int )-> Any:
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
return self.value
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
return self.name
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]:
"""simple docstring"""
return self.weight
def _SCREAMING_SNAKE_CASE ( self : Any )-> Union[str, Any]:
"""simple docstring"""
return self.value / self.weight
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(UpperCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
UpperCamelCase = sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ )
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0.0, 0.0
for i in range(len(UpperCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( )-> Dict:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a= logging.get_logger(__name__)
a= {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vit_msn'''
def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-0_6 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : int = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : int = layer_norm_eps
__UpperCamelCase : int = image_size
__UpperCamelCase : List[str] = patch_size
__UpperCamelCase : int = num_channels
__UpperCamelCase : Any = qkv_bias
| 714 | '''simple docstring'''
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = {}
def lowerCAmelCase ( self , _lowerCamelCase ):
if vertex not in self.adjacency:
__UpperCamelCase : Tuple = {}
self.num_vertices += 1
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
__UpperCamelCase : List[Any] = weight
__UpperCamelCase : Optional[int] = weight
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
__UpperCamelCase : Union[str, Any] = list(edges[i] )
edges.sort(key=lambda _lowerCamelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase : Optional[int] = edges[i][2] + 1
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
__UpperCamelCase : Tuple = weight
__UpperCamelCase : Optional[Any] = weight
def __str__( self ):
__UpperCamelCase : Optional[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase : Tuple = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase ( self ):
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase ( _lowerCamelCase=None , _lowerCamelCase=None ):
__UpperCamelCase : Tuple = Graph()
if vertices is None:
__UpperCamelCase : List[Any] = []
if edges is None:
__UpperCamelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : int = {}
def __len__( self ):
return len(self.parent )
def lowerCAmelCase ( self , _lowerCamelCase ):
if item in self.parent:
return self.find(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = item
__UpperCamelCase : List[str] = 0
return item
def lowerCAmelCase ( self , _lowerCamelCase ):
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
__UpperCamelCase : int = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Tuple = self.find(_lowerCamelCase )
__UpperCamelCase : Any = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase : Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase : Dict = roota
return roota
return None
@staticmethod
def lowerCAmelCase ( _lowerCamelCase ):
__UpperCamelCase : int = graph.num_vertices
__UpperCamelCase : str = Graph.UnionFind()
__UpperCamelCase : Tuple = []
while num_components > 1:
__UpperCamelCase : Dict = {}
for vertex in graph.get_vertices():
__UpperCamelCase : str = -1
__UpperCamelCase : int = graph.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = edge
__UpperCamelCase : List[Any] = union_find.find(_lowerCamelCase )
__UpperCamelCase : str = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase , _lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase : int = num_components - 1
__UpperCamelCase : Union[str, Any] = Graph.build(edges=_lowerCamelCase )
return mst
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "vivit"
def __init__( self : str , __snake_case : Tuple=2_2_4 , __snake_case : Union[str, Any]=3_2 , __snake_case : Optional[int]=[2, 1_6, 1_6] , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=7_6_8 , __snake_case : List[str]=1_2 , __snake_case : int=1_2 , __snake_case : Optional[int]=3_0_7_2 , __snake_case : Tuple="gelu_fast" , __snake_case : Union[str, Any]=0.0 , __snake_case : Tuple=0.0 , __snake_case : Optional[Any]=0.02 , __snake_case : str=1E-06 , __snake_case : Union[str, Any]=True , **__snake_case : Optional[int] , ) -> List[str]:
__magic_name__: Optional[Any] = hidden_size
__magic_name__: int = num_hidden_layers
__magic_name__: Optional[Any] = num_attention_heads
__magic_name__: Optional[int] = intermediate_size
__magic_name__: Dict = hidden_act
__magic_name__: Optional[Any] = hidden_dropout_prob
__magic_name__: int = attention_probs_dropout_prob
__magic_name__: Dict = initializer_range
__magic_name__: Optional[Any] = layer_norm_eps
__magic_name__: Tuple = image_size
__magic_name__: List[Any] = num_frames
__magic_name__: Any = tubelet_size
__magic_name__: List[str] = num_channels
__magic_name__: Union[str, Any] = qkv_bias
super().__init__(**__snake_case )
| 96 |
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0 ) -> int:
__magic_name__: str = 0
__magic_name__: Any = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 96 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=0.1 , snake_case_=0.1 , snake_case_=2_2_4 , snake_case_=1_0_0_0 , snake_case_=[3, 3, 6, 4] , snake_case_=[4_8, 5_6, 1_1_2, 2_2_0] , ):
"""simple docstring"""
A_ : Any = parent
A_ : str = batch_size
A_ : Any = num_channels
A_ : int = is_training
A_ : int = use_labels
A_ : Any = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : str = num_labels
A_ : int = image_size
A_ : Optional[int] = layer_depths
A_ : Union[str, Any] = embed_dims
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=snake_case_ , layer_scale_init_value=1E-5 , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = SwiftFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Any = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = self.num_labels
A_ : Tuple = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Optional[Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : Tuple = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
(A_) : Tuple = self.prepare_config_and_inputs()
A_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase_ : Optional[Any] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = False
lowercase_ : Optional[Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = SwiftFormerModelTester(self )
A_ : int = ConfigTester(
self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(snake_case_ )
A_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(snake_case_ )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = SwiftFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
A_ : List[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A_ : List[str] = outputs.hidden_states
A_ : List[Any] = 8
self.assertEqual(len(snake_case_ ) , snake_case_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(snake_case_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def _config_zero_init(snake_case_ ):
A_ : Optional[Any] = copy.deepcopy(snake_case_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(snake_case_ , snake_case_ , 1E-10 )
if isinstance(getattr(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ):
A_ : int = _config_zero_init(getattr(snake_case_ , snake_case_ ) )
setattr(snake_case_ , snake_case_ , snake_case_ )
return configs_no_init
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(snake_case_ )
A_ : Any = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case_ , return_tensors='pt' ).to(snake_case_ )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**snake_case_ )
# verify the logits
A_ : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A_ : Union[str, Any] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) ) | 708 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A_ : str = []
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : List[Any] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.0_00_85 , snake_case_ = 0.0_12 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = False , snake_case_ = False , snake_case_ = 1.0 , snake_case_ = "linspace" , snake_case_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
A_ : Optional[int] = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : List[Any] = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[str] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A_ : Union[str, Any] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
A_ : Dict = 1.0 - self.betas
A_ : Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = use_karras_sigmas
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if schedule_timesteps is None:
A_ : List[str] = self.timesteps
A_ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ : Tuple = 1 if len(snake_case_ ) > 1 else 0
else:
A_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
A_ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[int] = self.index_for_timestep(snake_case_ )
A_ : Union[str, Any] = self.sigmas[step_index]
A_ : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ : Dict = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : str = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Optional[Any] = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
A_ : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ : int = np.log(snake_case_ )
A_ : str = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
if self.config.use_karras_sigmas:
A_ : Union[str, Any] = self._convert_to_karras(in_sigmas=snake_case_ , num_inference_steps=self.num_inference_steps )
A_ : Dict = np.array([self._sigma_to_t(snake_case_ , snake_case_ ) for sigma in sigmas] )
A_ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ : List[Any] = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
A_ : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A_ : int = torch.from_numpy(snake_case_ )
A_ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case_ ).startswith('mps' ):
# mps does not support float64
A_ : int = timesteps.to(snake_case_ , dtype=torch.floataa )
else:
A_ : Tuple = timesteps.to(device=snake_case_ )
# empty dt and derivative
A_ : Optional[int] = None
A_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ : Any = defaultdict(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = np.log(snake_case_ )
# get distribution
A_ : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A_ : Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A_ : Tuple = low_idx + 1
A_ : Dict = log_sigmas[low_idx]
A_ : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
A_ : Any = (low - log_sigma) / (low - high)
A_ : Optional[int] = np.clip(snake_case_ , 0 , 1 )
# transform interpolation to time range
A_ : str = (1 - w) * low_idx + w * high_idx
A_ : Optional[int] = t.reshape(sigma.shape )
return t
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : float = in_sigmas[-1].item()
A_ : float = in_sigmas[0].item()
A_ : str = 7.0 # 7.0 is the value used in the paper
A_ : str = np.linspace(0 , 1 , snake_case_ )
A_ : List[str] = sigma_min ** (1 / rho)
A_ : int = sigma_max ** (1 / rho)
A_ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.dt is None
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : Dict = self.index_for_timestep(snake_case_ )
# advance index counter by 1
A_ : Tuple = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ : Tuple = self.sigmas[step_index]
A_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A_ : str = self.sigmas[step_index - 1]
A_ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ : Tuple = 0
A_ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ : str = sigma_hat if self.state_in_first_order else sigma_next
A_ : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ : Tuple = sigma_hat if self.state_in_first_order else sigma_next
A_ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A_ : Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
A_ : Union[str, Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ : str = sigma_next - sigma_hat
# store for 2nd order step
A_ : Optional[Any] = derivative
A_ : Union[str, Any] = dt
A_ : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
A_ : List[Any] = (sample - pred_original_sample) / sigma_next
A_ : Optional[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A_ : List[Any] = self.dt
A_ : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A_ : List[str] = None
A_ : Tuple = None
A_ : str = None
A_ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
A_ : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ : List[str] = self.timesteps.to(original_samples.device )
A_ : int = timesteps.to(original_samples.device )
A_ : Union[str, Any] = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
A_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ : List[Any] = sigma.unsqueeze(-1 )
A_ : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps | 302 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( a__ ):
"""simple docstring"""
A__ : List[Any] = ["image_processor", "tokenizer"]
A__ : Optional[int] = "CLIPImageProcessor"
A__ : Any = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Tuple , _snake_case : Any=None , _snake_case : Tuple=None , **_snake_case : List[str] ):
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
A__ = kwargs.pop('feature_extractor' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Dict=None , **_snake_case : Optional[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
A__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
A__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , *_snake_case : str , **_snake_case : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 9 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Initialise PyTorch model
_UpperCamelCase = FunnelConfig.from_json_file(lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase = FunnelBaseModel(lowerCAmelCase ) if base_model else FunnelModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 105 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A( __UpperCAmelCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = SMALL_MODEL_IDENTIFIER
_UpperCamelCase = '''pt'''
_UpperCamelCase = '''tf'''
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = TFAutoModel.from_pretrained(self.test_model, from_pt=A )
model_tf.save_pretrained(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model, A )
self.assertEqual(A, A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A, A )
self.assertEqual(A, A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A, A )
self.assertEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A )
self.assertEqual(A, self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A )
self.assertEqual(A, self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(A ):
_UpperCamelCase = FeaturesManager.determine_framework(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_torch_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_tf )
# Both in environment -> use PyTorch
_UpperCamelCase = MagicMock(return_value=A )
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ), patch(
'''transformers.onnx.features.is_torch_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_pt )
# Both not in environment -> raise error
_UpperCamelCase = MagicMock(return_value=A )
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ), patch(
'''transformers.onnx.features.is_torch_available''', A ):
with self.assertRaises(A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
| 105 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( a_ ):
"""simple docstring"""
UpperCAmelCase__ = (DDPMScheduler,)
def snake_case ( self : List[str] , **__A : Tuple ):
"""simple docstring"""
_lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__UpperCamelCase )
return config
def snake_case ( self : Tuple ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def snake_case ( self : str ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def snake_case ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def snake_case ( self : List[Any] ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__UpperCamelCase )
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1e-5
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = len(__UpperCamelCase )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter
_lowercase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_lowercase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_lowercase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowercase = pred_prev_sample
_lowercase = torch.sum(torch.abs(__UpperCamelCase ) )
_lowercase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = len(__UpperCamelCase )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter
_lowercase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_lowercase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_lowercase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowercase = pred_prev_sample
_lowercase = torch.sum(torch.abs(__UpperCamelCase ) )
_lowercase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
_lowercase = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
_lowercase = -1
else:
_lowercase = timesteps[i + 1]
_lowercase = scheduler.previous_timestep(__UpperCamelCase )
_lowercase = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__UpperCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = [1_0_0, 8_7, 5_0, 1, 0]
_lowercase = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__UpperCamelCase )
_lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 497 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : List[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''OwlViTFeatureExtractor''']
lowerCamelCase : str = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 | 0 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : float
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
def __magic_name__( lowerCamelCase):
# Validation
def is_valid_tree(lowerCamelCase) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase, lowerCamelCase):
return False
try:
float(node.data)
except (TypeError, ValueError):
return False
return is_valid_tree(node.left) and is_valid_tree(node.right)
if not is_valid_tree(lowerCamelCase):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''')
def is_binary_search_tree_recursive_check(
lowerCamelCase, lowerCamelCase, lowerCamelCase) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left, lowerCamelCase, node.data)
and is_binary_search_tree_recursive_check(
node.right, node.data, lowerCamelCase)
)
return is_binary_search_tree_recursive_check(lowerCamelCase, -float('''inf'''), float('''inf'''))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase : str = ["""gpt2"""]
_UpperCAmelCase : Optional[int] = """gpt2"""
if is_tf_available():
class a__ ( tf.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoConfig.from_pretrained(__lowercase )
__lowerCAmelCase = TFGPTaLMHeadModel.from_config(__lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.tokenizer(__lowercase )
__lowerCAmelCase = tokenized['''input_ids'''].to_tensor()
__lowerCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCAmelCase = self.model(input_ids=__lowercase , attention_mask=__lowercase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = [GPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCAmelCase = [TFGPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCAmelCase = python_outputs[key].numpy()
__lowerCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.function(__lowercase )
for test_inputs in self.test_sentences:
__lowerCAmelCase = tf.constant(__lowercase )
__lowerCAmelCase = compiled_tokenizer(__lowercase )
__lowerCAmelCase = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = ModelToSave(tokenizer=__lowercase )
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = model.serving(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(__lowercase ) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving} )
__lowerCAmelCase = tf.saved_model.load(__lowercase )
__lowerCAmelCase = loaded_model.signatures['''serving_default'''](__lowercase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase ) # Build model with some sample inputs
__lowerCAmelCase = tf_tokenizer.get_config()
__lowerCAmelCase = TFGPTaTokenizer.from_config(__lowercase )
__lowerCAmelCase = model_from_config(__lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase , max_length=__lowercase )
__lowerCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 474 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 604 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | 1 |
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE__ : str = b * b - 4 * a * c
SCREAMING_SNAKE_CASE__ : int = (-b + sqrt(_lowerCamelCase )) / (2 * a)
SCREAMING_SNAKE_CASE__ : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }") | 26 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__SCREAMING_SNAKE_CASE = soup.find('meta', {'property': 'og:image'})["content"]
__SCREAMING_SNAKE_CASE = requests.get(image_url).content
__SCREAMING_SNAKE_CASE = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 357 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a : List[str] = logging.getLogger(__name__)
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
a : Optional[int] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
a : Optional[Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
a : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
a : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
a : Tuple = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 712 |
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : int = 10, 9
a : Any = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : List[Any] = 0
a : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 593 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModel)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 88 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModel)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowercase__ ( _BaseAutoModelClass ):
__UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 88 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : int = "▁"
_lowerCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = self.tokenizer_class(a_ ,keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<s>'
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) ,a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'[MASK]' )
self.assertEqual(len(a_ ) ,1004 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(a_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ ,a_ )
lowerCAmelCase__ = tokenizer.encode(a_ ,add_special_tokens=a_ )
lowerCAmelCase__ = rust_tokenizer.encode(a_ ,add_special_tokens=a_ )
self.assertListEqual(a_ ,a_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(a_ )
lowerCAmelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BigBirdTokenizer(a_ ,keep_accents=a_ )
lowerCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) ,[285, 46, 10, 170, 382] ,)
lowerCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'Hello World!'
lowerCAmelCase__ = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a_ ,self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ ,self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ = ' '.join(a_ )
lowerCAmelCase__ = self.big_tokenizer.encode_plus(a_ ,return_tensors='pt' ,return_token_type_ids=a_ )
lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] ,return_tensors='pt' ,return_token_type_ids=a_ )
lowerCAmelCase__ = BigBirdConfig(attention_type='original_full' )
lowerCAmelCase__ = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCAmelCase__ = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ ,model_name='google/bigbird-roberta-base' ,revision='215c99f1600e06f83acce68422f2035b2b5c3510' ,)
| 604 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
lowerCAmelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
lowerCAmelCase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
lowerCAmelCase__ = tf_top_k_top_p_filtering(a_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
lowerCAmelCase__ = output[output != -float('inf' )]
lowerCAmelCase__ = tf.cast(
tf.where(tf.not_equal(a_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(a_ ,a_ ,rtol=1e-1_2 )
tf.debugging.assert_equal(a_ ,a_ )
@require_tf
class __snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2, 0], [102, 103]]
lowerCAmelCase__ = [[1, 0], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for batch_size in range(1 ,len(a_ ) + 1 ):
lowerCAmelCase__ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2], [102, 103]]
lowerCAmelCase__ = [[1], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for input_row in range(len(a_ ) ):
lowerCAmelCase__ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ ,'spiece.model' ) ,'rb' ).read() )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.tokenize(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = text.pad_model_inputs(
a_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ = self.model.generate(input_ids=a_ ,attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase__ = CompleteSentenceTransformer()
lowerCAmelCase__ = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
lowerCAmelCase__ = complete_model(a_ )
lowerCAmelCase__ = tf.keras.Model(a_ ,a_ )
keras_model.save(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase__ = 14
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 'Hello, my dog is cute and'
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='tf' )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase__ = bart_tokenizer(a_ ,return_tensors='tf' ).input_ids
lowerCAmelCase__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(a_ ,a_ ) )
class __snake_case ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeEncoder(bart_model.config ,bart_model.model.shared )
lowerCAmelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ ,foo='bar' )
| 604 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> list:
"""simple docstring"""
if len(A ) <= 1:
return [tuple(A )]
lowercase__ = []
def generate(A , A ):
lowercase__ = [0] * n
res.append(tuple(A ) )
lowercase__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase__ ,lowercase__ = arr[i], arr[0]
else:
lowercase__ ,lowercase__ = arr[i], arr[c[i]]
res.append(tuple(A ) )
c[i] += 1
lowercase__ = 0
else:
lowercase__ = 0
i += 1
generate(len(A ) , A )
return res
if __name__ == "__main__":
lowerCamelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 460 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _SCREAMING_SNAKE_CASE (A , A , A=1E-12 ) -> str:
"""simple docstring"""
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class __lowerCAmelCase (nn.Module ):
'''simple docstring'''
lowerCAmelCase__ : CLIPConfig
lowerCAmelCase__ : jnp.dtype = jnp.floataa
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowercase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
lowercase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self : Union[str, Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.vision_model(UpperCamelCase )[1]
lowercase__ = self.visual_projection(UpperCamelCase )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ = 0.0
lowercase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ = is_special_care * 0.01
lowercase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = CLIPConfig
lowerCAmelCase__ : Optional[int] = """clip_input"""
lowerCAmelCase__ : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : Optional[int] , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
if input_shape is None:
lowercase__ = (1, 224, 224, 3)
lowercase__ = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def UpperCamelCase__ (self : Any , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ):
'''simple docstring'''
lowercase__ = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowercase__ ,lowercase__ = jax.random.split(UpperCamelCase )
lowercase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
lowercase__ = self.module.init(UpperCamelCase , UpperCamelCase )['''params''']
return random_params
def __call__(self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ):
'''simple docstring'''
lowercase__ = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 460 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a = {"facebook/blenderbot_small-90M": 512}
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
__SCREAMING_SNAKE_CASE = set(__UpperCAmelCase )
return pairs
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple="__start__" ,lowerCamelCase : Tuple="__end__" ,lowerCamelCase : Dict="__unk__" ,lowerCamelCase : str="__null__" ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,pad_token=lowerCamelCase ,**lowerCamelCase )
with open(lowerCamelCase ,encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase ,encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = {}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = re.sub("""([.,!?()])""" ,r""" \1""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = re.sub("""(')""" ,r""" \1 """ ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = re.sub(r"""\s{2,}""" ,""" """ ,lowerCamelCase )
if "\n" in token:
__SCREAMING_SNAKE_CASE = token.replace("""\n""" ,""" __newln__""" )
__SCREAMING_SNAKE_CASE = token.split(""" """ )
__SCREAMING_SNAKE_CASE = []
for token in tokens:
if not len(lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE = token.lower()
__SCREAMING_SNAKE_CASE = tuple(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
__SCREAMING_SNAKE_CASE = min(lowerCamelCase ,key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(lowerCamelCase ):
try:
__SCREAMING_SNAKE_CASE = word.index(lowerCamelCase ,lowerCamelCase )
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(lowerCamelCase )
__SCREAMING_SNAKE_CASE = new_word
if len(lowerCamelCase ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """@@ """.join(lowerCamelCase )
__SCREAMING_SNAKE_CASE = word[:-4]
__SCREAMING_SNAKE_CASE = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = re.findall(r"""\S+\n?""" ,lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = token.lower()
return self.encoder.get(lowerCamelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase ,self.unk_token )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase ,ensure_ascii=lowerCamelCase ) + """\n""" )
__SCREAMING_SNAKE_CASE = 0
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
random.seed(_lowerCamelCase )
np.random.seed(_lowerCamelCase )
torch.manual_seed(_lowerCamelCase )
torch.cuda.manual_seed_all(_lowerCamelCase )
# ^^ safe to call this function even if cuda is not available
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = 0.9999 , _snake_case = 0.0 , _snake_case = 0 , _snake_case = False , _snake_case = 1.0 , _snake_case = 2 / 3 , _snake_case = None , _snake_case = None , **_snake_case , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Module ):
UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('''max_value''' , SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = kwargs['''min_value''']
UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
self.to(device=kwargs['''device'''] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def snake_case_ ( cls , _snake_case , _snake_case ) -> "EMAModel":
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = model_cls.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = cls(model.parameters() , model_cls=SCREAMING_SNAKE_CASE_ , model_config=model.config )
ema_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
return ema_model
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('''shadow_params''' , SCREAMING_SNAKE_CASE_ )
model.register_to_config(**SCREAMING_SNAKE_CASE_ )
self.copy_to(model.parameters() )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self , _snake_case ) -> float:
"""simple docstring"""
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(SCREAMING_SNAKE_CASE_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(SCREAMING_SNAKE_CASE_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def snake_case_ ( self , _snake_case ) -> Tuple:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Module ):
UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(SCREAMING_SNAKE_CASE_ , modifier_rank=SCREAMING_SNAKE_CASE_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE_ ):
param.data.copy_(s_param.to(param.device ).data )
def snake_case_ ( self , _snake_case=None , _snake_case=None ) -> None:
"""simple docstring"""
UpperCAmelCase = [
p.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if p.is_floating_point() else p.to(device=SCREAMING_SNAKE_CASE_ )
for p in self.shadow_params
]
def snake_case_ ( self ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , SCREAMING_SNAKE_CASE_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def snake_case_ ( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
UpperCAmelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Invalid min_decay''' )
UpperCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Invalid optimization_step''' )
UpperCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Invalid update_after_step''' )
UpperCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Invalid use_ema_warmup''' )
UpperCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
UpperCAmelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
UpperCAmelCase = state_dict.get('''shadow_params''' , SCREAMING_SNAKE_CASE_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 254 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowercase ( lowercase__ ):
lowercase = '''openai-gpt'''
lowercase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self : List[Any] ,SCREAMING_SNAKE_CASE_ : str=40_478 ,SCREAMING_SNAKE_CASE_ : Optional[int]=512 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=768 ,SCREAMING_SNAKE_CASE_ : Dict=12 ,SCREAMING_SNAKE_CASE_ : Dict=12 ,SCREAMING_SNAKE_CASE_ : int="gelu" ,SCREAMING_SNAKE_CASE_ : Dict=0.1 ,SCREAMING_SNAKE_CASE_ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE_ : Dict=0.1 ,SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 ,SCREAMING_SNAKE_CASE_ : List[str]=0.02 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]="cls_index" ,SCREAMING_SNAKE_CASE_ : str=True ,SCREAMING_SNAKE_CASE_ : Any=None ,SCREAMING_SNAKE_CASE_ : List[Any]=True ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,**SCREAMING_SNAKE_CASE_ : int ,) -> int:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = afn
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = summary_type
lowerCAmelCase = summary_use_proj
lowerCAmelCase = summary_activation
lowerCAmelCase = summary_first_dropout
lowerCAmelCase = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 535 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase =sys.version_info >= (3, 10)
def lowerCamelCase__ ( __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __magic_name__ :
UpperCAmelCase =4_2
UpperCAmelCase =4_2
UpperCAmelCase =4_2
UpperCAmelCase =4_2
@dataclass
class __magic_name__ :
UpperCAmelCase =4_2
UpperCAmelCase =field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class __magic_name__ :
UpperCAmelCase =False
UpperCAmelCase =True
UpperCAmelCase =None
class __magic_name__ ( __A ):
UpperCAmelCase ="titi"
UpperCAmelCase ="toto"
class __magic_name__ ( __A ):
UpperCAmelCase ="titi"
UpperCAmelCase ="toto"
UpperCAmelCase =4_2
@dataclass
class __magic_name__ :
UpperCAmelCase ="toto"
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =BasicEnum(self.foo)
@dataclass
class __magic_name__ :
UpperCAmelCase ="toto"
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MixedTypeEnum(self.foo)
@dataclass
class __magic_name__ :
UpperCAmelCase =None
UpperCAmelCase =field(default=__A ,metadata={"help": "help message"} )
UpperCAmelCase =None
UpperCAmelCase =list_field(default=[] )
UpperCAmelCase =list_field(default=[] )
@dataclass
class __magic_name__ :
UpperCAmelCase =list_field(default=[] )
UpperCAmelCase =list_field(default=[1, 2, 3] )
UpperCAmelCase =list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __magic_name__ :
UpperCAmelCase =field()
UpperCAmelCase =field()
UpperCAmelCase =field()
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] =BasicEnum(self.required_enum)
@dataclass
class __magic_name__ :
UpperCAmelCase =4_2
UpperCAmelCase =field()
UpperCAmelCase =None
UpperCAmelCase =field(default="toto" ,metadata={"help": "help message"} )
UpperCAmelCase =list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
UpperCAmelCase =False
UpperCAmelCase =True
UpperCAmelCase =None
@dataclass
class __magic_name__ :
UpperCAmelCase =None
UpperCAmelCase =field(default=__A ,metadata={"help": "help message"} )
UpperCAmelCase =None
UpperCAmelCase =list_field(default=[] )
UpperCAmelCase =list_field(default=[] )
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self , snake_case , snake_case) -> Any:
'''simple docstring'''
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
_UpperCAmelCase : Optional[Any] ={k: v for k, v in vars(snake_case).items() if k != '''container'''}
_UpperCAmelCase : Union[str, Any] ={k: v for k, v in vars(snake_case).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , snake_case) and yy.get('choices' , snake_case):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](snake_case) , yy['type'](snake_case))
del xx["type"], yy["type"]
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] =HfArgumentParser(snake_case)
_UpperCAmelCase : Any =argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , required=snake_case)
expected.add_argument('--bar' , type=snake_case , required=snake_case)
expected.add_argument('--baz' , type=snake_case , required=snake_case)
expected.add_argument('--flag' , type=snake_case , default=snake_case , const=snake_case , nargs='?')
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : Optional[Any] =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(_UpperCAmelCase ) : str =parser.parse_args_into_dataclasses(snake_case , look_for_args_file=snake_case)
self.assertFalse(example.flag)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =HfArgumentParser(snake_case)
_UpperCAmelCase : str =argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=snake_case)
expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message')
self.argparsersEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , default=snake_case , const=snake_case , nargs='?')
expected.add_argument('--baz' , type=snake_case , default=snake_case , const=snake_case , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=snake_case , dest='baz')
expected.add_argument('--opt' , type=snake_case , default=snake_case)
_UpperCAmelCase : Optional[int] =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case)
for dataclass_type in dataclass_types:
_UpperCAmelCase : Any =HfArgumentParser(snake_case)
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : Any =parser.parse_args([])
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case))
_UpperCAmelCase : Optional[Any] =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case))
_UpperCAmelCase : Optional[Any] =parser.parse_args(['--foo', '--baz'])
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case))
_UpperCAmelCase : int =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case))
_UpperCAmelCase : Optional[int] =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case))
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =HfArgumentParser(snake_case)
_UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2]) , )
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
_UpperCAmelCase : Dict =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
_UpperCAmelCase : Union[str, Any] =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
_UpperCAmelCase : Union[str, Any] =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
_UpperCAmelCase : int =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 4_2)
_UpperCAmelCase : List[Any] =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
@dataclass
class __magic_name__ :
UpperCAmelCase ="toto"
_UpperCAmelCase : int =HfArgumentParser(snake_case)
_UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2]) , )
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : str =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
_UpperCAmelCase : Tuple =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
_UpperCAmelCase : str =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 4_2)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : str =HfArgumentParser(snake_case)
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=snake_case)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=snake_case)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=snake_case)
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : Optional[Any] =parser.parse_args([])
self.assertEqual(
snake_case , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
_UpperCAmelCase : List[str] =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(snake_case , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
expected.add_argument('--foo' , default=snake_case , type=snake_case)
expected.add_argument('--bar' , default=snake_case , type=snake_case , help='help message')
expected.add_argument('--baz' , default=snake_case , type=snake_case)
expected.add_argument('--ces' , nargs='+' , default=[] , type=snake_case)
expected.add_argument('--des' , nargs='+' , default=[] , type=snake_case)
_UpperCAmelCase : int =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case)
for dataclass_type in dataclass_types:
_UpperCAmelCase : Union[str, Any] =HfArgumentParser(snake_case)
self.argparsersEqual(snake_case , snake_case)
_UpperCAmelCase : Any =parser.parse_args([])
self.assertEqual(snake_case , Namespace(foo=snake_case , bar=snake_case , baz=snake_case , ces=[] , des=[]))
_UpperCAmelCase : Tuple =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(snake_case , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str =HfArgumentParser(snake_case)
_UpperCAmelCase : Dict =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=snake_case , required=snake_case)
expected.add_argument('--required_str' , type=snake_case , required=snake_case)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=snake_case , )
self.argparsersEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =HfArgumentParser(snake_case)
_UpperCAmelCase : Union[str, Any] =argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , required=snake_case)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=snake_case , )
expected.add_argument('--opt' , type=snake_case , default=snake_case)
expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case)
self.argparsersEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =HfArgumentParser(snake_case)
_UpperCAmelCase : List[Any] ={
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCAmelCase : Tuple =parser.parse_dict(snake_case)[0]
_UpperCAmelCase : Optional[Any] =BasicExample(**snake_case)
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =HfArgumentParser(snake_case)
_UpperCAmelCase : Tuple ={
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(snake_case , parser.parse_dict , snake_case , allow_extra_keys=snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =HfArgumentParser(snake_case)
_UpperCAmelCase : Tuple ={
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : int =os.path.join(snake_case , 'temp_json')
os.mkdir(snake_case)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
_UpperCAmelCase : str =BasicExample(**snake_case)
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =HfArgumentParser(snake_case)
_UpperCAmelCase : Dict ={
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] =os.path.join(snake_case , 'temp_yaml')
os.mkdir(snake_case)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
_UpperCAmelCase : str =BasicExample(**snake_case)
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =HfArgumentParser(snake_case)
self.assertIsNotNone(snake_case)
| 708 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =[9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
_UpperCAmelCase : Optional[Any] =math.log(len(__lowerCamelCase ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 331 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE__ :Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Tuple = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = [''.join(UpperCAmelCase__ ) for row in temp_grid]
SCREAMING_SNAKE_CASE__ :str = ''.join(UpperCAmelCase__ )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :str = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Dict = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE__ :Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE__ :int = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = '' # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Any = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[int, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = {}
for key_guess in range(1 , len(UpperCAmelCase__ ) ): # tries every key
SCREAMING_SNAKE_CASE__ :List[str] = decrypt(UpperCAmelCase__ , UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowercase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = size if size is not None else {"""shortest_edge""": 2_24}
lowercase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase__ : int = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""")
lowercase__ : Optional[Any] = do_resize
lowercase__ : Dict = size
lowercase__ : List[Any] = do_center_crop
lowercase__ : Any = crop_size
lowercase__ : Optional[int] = resample
lowercase__ : Union[str, Any] = do_rescale
lowercase__ : Optional[int] = rescale_factor
lowercase__ : str = do_normalize
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
if "shortest_edge" in size:
lowercase__ : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_)
elif "height" in size and "width" in size:
lowercase__ : Any = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_)
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = to_numpy_array(SCREAMING_SNAKE_CASE_)
if do_resize:
lowercase__ : Tuple = self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_)
if do_center_crop:
lowercase__ : Tuple = self.center_crop(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_)
if do_rescale:
lowercase__ : Tuple = self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_)
if do_normalize:
lowercase__ : List[Any] = self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return image
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[Any] = image_std if image_std is not None else self.image_std
lowercase__ : List[Any] = size if size is not None else self.size
lowercase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""")
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
lowercase__ : Optional[Any] = make_batched(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE_ , do_resize=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size=SCREAMING_SNAKE_CASE_ , do_rescale=SCREAMING_SNAKE_CASE_ , rescale_factor=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , )
for img in video
]
for video in videos
]
lowercase__ : str = {"""pixel_values""": videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 495 |
lowerCamelCase__ : Any = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 495 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "AutoTokenizer"
_UpperCamelCase = ["tokenizer"]
_UpperCamelCase = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[int]:
super().__init__(UpperCAmelCase__ )
a_ = speaker_embeddings
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase__ , UpperCAmelCase__="speaker_embeddings_path.json" , **UpperCAmelCase__ ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
a_ = get_file_from_repo(
UpperCAmelCase__ , UpperCAmelCase__ , subfolder=kwargs.pop('subfolder' , UpperCAmelCase__ ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase__ ) , force_download=kwargs.pop('force_download' , UpperCAmelCase__ ) , proxies=kwargs.pop('proxies' , UpperCAmelCase__ ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase__ ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase__ ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase__ ) , revision=kwargs.pop('revision' , UpperCAmelCase__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
a_ = None
else:
with open(UpperCAmelCase__ ) as speaker_embeddings_json:
a_ = json.load(UpperCAmelCase__ )
else:
a_ = None
a_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
return cls(tokenizer=UpperCAmelCase__ , speaker_embeddings=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__="speaker_embeddings_path.json" , UpperCAmelCase__="speaker_embeddings" , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> Dict:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ , 'v2' ) , exist_ok=UpperCAmelCase__ )
a_ = {}
a_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a_ = self._load_voice_preset(UpperCAmelCase__ )
a_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase__ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=UpperCAmelCase__ , )
a_ = os.path.join(UpperCAmelCase__ , F'''{prompt_key}_{key}.npy''' )
a_ = tmp_dict
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , 'w' ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = None , **UpperCAmelCase__ ) -> Tuple:
a_ = self.speaker_embeddings[voice_preset]
a_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
a_ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase__ ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase__ ) , force_download=kwargs.pop('force_download' , UpperCAmelCase__ ) , proxies=kwargs.pop('proxies' , UpperCAmelCase__ ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase__ ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase__ ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase__ ) , revision=kwargs.pop('revision' , UpperCAmelCase__ ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
a_ = np.load(UpperCAmelCase__ )
return voice_preset_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = None ) -> Union[str, Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="pt" , UpperCAmelCase__=256 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=False , **UpperCAmelCase__ , ) -> str:
if voice_preset is not None and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a_ = self._load_voice_preset(UpperCAmelCase__ )
else:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not voice_preset.endswith('.npz' ):
a_ = voice_preset + '.npz'
a_ = np.load(UpperCAmelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
a_ = self.tokenizer(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , padding='max_length' , max_length=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
if voice_preset is not None:
a_ = voice_preset
return encoded_text
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__ : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30_522, type=int)
a__ : Dict = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
a__ : List[Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
a__ : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
a__ : int = [0] * args.vocab_size
for k, v in counter.items():
a__ : str = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 570 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableDiffusionInpaintPipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase =frozenset([] )
def __snake_case ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
UpperCAmelCase = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase = CLIPTextModel(a__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self : Dict , a__ : List[Any] , a__ : Tuple=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : int ):
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = StableDiffusionInpaintPipeline(**a__ )
UpperCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = sd_pipe(**a__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __snake_case ( self : int ):
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __snake_case ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 570 | 1 |
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [0, 2, 4, 6, 8]
UpperCamelCase__ : List[str] = [1, 3, 5, 7, 9]
def __UpperCamelCase( _A : int , _A : int , _A : list[int] , _A : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase__ : Tuple = 0
for digit in range(10 ):
UpperCAmelCase__ : int = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _A , _A )
return result
UpperCAmelCase__ : List[str] = 0
for digita in range(10 ):
UpperCAmelCase__ : List[str] = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase__ : Optional[int] = ODD_DIGITS
else:
UpperCAmelCase__ : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase__ : Dict = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _A , _A , )
return result
def __UpperCamelCase( _A : int = 9 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_A , 0 , [0] * length , _A )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 614 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ : Dict = logging.getLogger(__name__)
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_A , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_A , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_A , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_A , default='''data/dump''' , help='''The dump file prefix.''' )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
UpperCAmelCase__ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
UpperCAmelCase__ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
UpperCAmelCase__ : List[Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(_A )} examples to process.''' )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 1_00_00
UpperCAmelCase__ : Tuple = time.time()
for text in data:
UpperCAmelCase__ : Any = F'''{bos} {text.strip()} {sep}'''
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ : int = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
UpperCAmelCase__ : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(_A )} examples processed.''' )
UpperCAmelCase__ : Dict = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
UpperCAmelCase__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ : Any = [np.uintaa(_A ) for d in rslt]
else:
UpperCAmelCase__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(rslt_ , _A , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 614 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase = Mapping[str, np.ndarray]
UpperCamelCase = Mapping[str, Any] # Is a nested dict.
UpperCamelCase = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase__ )
class _a :
'''simple docstring'''
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase_ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase_ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase_ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCamelCase_ : Optional[Sequence[int]] = None
def lowerCamelCase_ ( _lowercase ) -> Protein:
__A : Optional[int] = r"(\[[A-Z]+\]\n)"
__A : List[str] = [tag.strip() for tag in re.split(_lowercase , _lowercase ) if len(_lowercase ) > 0]
__A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
__A : List[str] = ["N", "CA", "C"]
__A : Any = None
__A : int = None
__A : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
__A : List[str] = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
__A : Optional[int] = "X" # FIXME: strings are immutable
__A : str = np.array(
[residue_constants.restype_order.get(_lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase , g[1][axis].split() ) ) )
__A : str = np.array(_lowercase )
__A : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__A : str = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__A : Dict = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
__A : List[str] = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__A : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase , atom_mask=_lowercase , aatype=_lowercase , residue_index=np.arange(len(_lowercase ) ) , b_factors=_lowercase , )
def lowerCamelCase_ ( _lowercase , _lowercase = 0 ) -> List[str]:
__A : List[str] = []
__A : Any = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
__A : Dict = prot.parents
__A : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__A : Optional[int] = [p for i, p in zip(_lowercase , _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
__A : List[Any] = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(_lowercase )}" )
return pdb_headers
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : List[str] = []
__A : Optional[int] = pdb_str.split("\n" )
__A : Union[str, Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
__A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__A : List[Any] = []
if prot.parents_chain_index is not None:
__A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ) , [] )
parent_dict[str(_lowercase )].append(_lowercase )
__A : Tuple = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__A : List[Any] = parent_dict.get(str(_lowercase ) , ["N/A"] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__A : Union[str, Any] = [["N/A"]]
def make_parent_line(_lowercase ) -> str:
return F"PARENT {' '.join(_lowercase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__A : List[str] = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
__A : Any = parents_per_chain[chain_counter]
else:
__A : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def lowerCamelCase_ ( _lowercase ) -> str:
__A : int = residue_constants.restypes + ["X"]
def res_atoa(_lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
__A : Dict = residue_constants.atom_types
__A : List[str] = []
__A : Any = prot.atom_mask
__A : Dict = prot.aatype
__A : str = prot.atom_positions
__A : Any = prot.residue_index.astype(np.intaa )
__A : Optional[Any] = prot.b_factors
__A : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
__A : Tuple = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
__A : Any = aatype.shape[0]
__A : int = 1
__A : Union[str, Any] = 0
__A : Union[str, Any] = string.ascii_uppercase
__A : Dict = None
# Add all atom sites.
for i in range(_lowercase ):
__A : Tuple = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__A : Optional[Any] = "ATOM"
__A : List[str] = atom_name if len(_lowercase ) == 4 else F" {atom_name}"
__A : Dict = ""
__A : Optional[Any] = ""
__A : List[str] = 1.00
__A : str = atom_name[0] # Protein supports only C, N, O, S, this works.
__A : Union[str, Any] = ""
__A : str = "A"
if chain_index is not None:
__A : Any = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__A : str = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(_lowercase )
atom_index += 1
__A : Optional[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__A : List[Any] = True
__A : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
__A : Any = "TER"
__A : str = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase , _lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_lowercase )
def lowerCamelCase_ ( _lowercase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Protein:
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_lowercase , remark=_lowercase , parents=_lowercase , parents_chain_index=_lowercase , )
| 387 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase = HfApi()
UpperCamelCase = {}
# fmt: off
UpperCamelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCamelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCamelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCamelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCamelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCamelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCamelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCamelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCamelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCamelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCamelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCamelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCamelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCamelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCamelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCamelCase = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 387 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
) | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
def _snake_case (_snake_case : list[int] , _snake_case : int) -> bool:
_lowercase =len(_snake_case)
_lowercase =[[False] * (required_sum + 1) for _ in range(arr_len + 1)]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1):
_lowercase =True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1):
_lowercase =False
for i in range(1 , arr_len + 1):
for j in range(1 , required_sum + 1):
if arr[i - 1] > j:
_lowercase =subset[i - 1][j]
if arr[i - 1] <= j:
_lowercase =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 557 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_SCREAMING_SNAKE_CASE = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _snake_case (_snake_case : Dict , _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple , _snake_case : int) -> Dict:
for attribute in key.split('.'):
_lowercase =getattr(_snake_case , _snake_case)
if weight_type is not None:
_lowercase =getattr(_snake_case , _snake_case).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
else:
_lowercase =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def _snake_case (_snake_case : List[str] , _snake_case : Union[str, Any]) -> List[Any]:
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == 'group' , )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
_lowercase ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.')[:-1]) != key):
# special case since naming is very similar
continue
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(_snake_case)[0].split('.')[-2]
_lowercase =mapped_key.replace('*' , _snake_case)
if "weight_g" in name:
_lowercase ='weight_g'
elif "weight_v" in name:
_lowercase ='weight_v'
elif "bias" in name:
_lowercase ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase ='weight'
else:
_lowercase =None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
continue
if not is_used:
unused_weights.append(_snake_case)
logger.warning(f'''Unused weights: {unused_weights}''')
def _snake_case (_snake_case : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : int) -> Optional[Any]:
_lowercase =full_name.split('conv_layers.')[-1]
_lowercase =name.split('.')
_lowercase =int(items[0])
_lowercase =int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''')
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_snake_case)
@torch.no_grad()
def _snake_case (_snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Union[str, Any]=None , _snake_case : str=None , _snake_case : Union[str, Any]=True) -> Dict:
if config_path is not None:
_lowercase =UniSpeechSatConfig.from_pretrained(_snake_case)
else:
_lowercase =UniSpeechSatConfig()
_lowercase =''
if is_finetuned:
_lowercase =UniSpeechSatForCTC(_snake_case)
else:
_lowercase =UniSpeechSatForPreTraining(_snake_case)
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
_lowercase =model[0].eval()
recursively_load_weights(_snake_case , _snake_case)
hf_wavavec.save_pretrained(_snake_case)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 557 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
lowerCAmelCase__ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase__ : int = val
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Optional[Any]:
lowerCAmelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase__ : List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowerCAmelCase__ : List[Any] = value
else:
lowerCAmelCase__ : Optional[int] = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Optional[Any]:
lowerCAmelCase__ : str = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ : Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : Tuple = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Tuple = in_proj_weight[:2_5_6, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[:2_5_6]
lowerCAmelCase__ : Dict = in_proj_weight[2_5_6:5_1_2, :]
lowerCAmelCase__ : Dict = in_proj_bias[2_5_6:5_1_2]
lowerCAmelCase__ : Tuple = in_proj_weight[-2_5_6:, :]
lowerCAmelCase__ : Any = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : List[str] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Dict = in_proj_weight[:2_5_6, :]
lowerCAmelCase__ : Any = in_proj_bias[:2_5_6]
lowerCAmelCase__ : int = in_proj_weight[2_5_6:5_1_2, :]
lowerCAmelCase__ : Any = in_proj_bias[2_5_6:5_1_2]
lowerCAmelCase__ : Tuple = in_proj_weight[-2_5_6:, :]
lowerCAmelCase__ : List[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ : Optional[Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight_cross_attn[:2_5_6, :]
lowerCAmelCase__ : Optional[Any] = in_proj_bias_cross_attn[:2_5_6]
lowerCAmelCase__ : Dict = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
lowerCAmelCase__ : Optional[Any] = in_proj_bias_cross_attn[2_5_6:5_1_2]
lowerCAmelCase__ : List[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
lowerCAmelCase__ : Tuple = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = image.size
lowerCAmelCase__ : Optional[Any] = max(lowercase__ , lowercase__ )
lowerCAmelCase__ : List[str] = 8_0_0 if "detection" in checkpoint_url else 1_0_0_0
lowerCAmelCase__ : List[str] = target_max_size / current_max_size
lowerCAmelCase__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = F.to_tensor(lowercase__ )
lowerCAmelCase__ : Optional[Any] = F.normalize(lowercase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
logger.info("Converting model..." )
# load original state dict
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ : Dict = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ : Optional[int] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCAmelCase__ : Dict = state_dict.pop(lowercase__ )
lowerCAmelCase__ : Union[str, Any] = val
# create HuggingFace model and load state dict
lowerCAmelCase__ : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase__ : Any = 1_5
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : Union[str, Any] = {0: "table", 1: "table rotated"}
lowerCAmelCase__ : Any = idalabel
lowerCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase__ : str = 1_2_5
lowerCAmelCase__ : Any = 6
lowerCAmelCase__ : Optional[int] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowerCAmelCase__ : str = idalabel
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = DetrImageProcessor(
format="coco_detection" , max_size=8_0_0 if "detection" in checkpoint_url else 1_0_0_0 )
lowerCAmelCase__ : Optional[int] = TableTransformerForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# verify our conversion
lowerCAmelCase__ : int = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowerCAmelCase__ : Optional[int] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=lowercase__ )
lowerCAmelCase__ : Union[str, Any] = Image.open(lowercase__ ).convert("RGB" )
lowerCAmelCase__ : Dict = normalize(resize(lowercase__ , lowercase__ ) ).unsqueeze(0 )
lowerCAmelCase__ : Any = model(lowercase__ )
if "detection" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = (1, 1_5, 3)
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowerCAmelCase__ : int = (1, 1_2_5, 7)
lowerCAmelCase__ : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowerCAmelCase__ : str = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowerCAmelCase__ : Optional[Any] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(lowercase__ )
image_processor.push_to_hub(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 453 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> list:
lowerCAmelCase__ : List[str] = len(lowercase__ )
lowerCAmelCase__ : Dict = []
for i in range(len(lowercase__ ) - pat_len + 1 ):
lowerCAmelCase__ : Union[str, Any] = True
for j in range(lowercase__ ):
if s[i + j] != pattern[j]:
lowerCAmelCase__ : int = False
break
if match_found:
position.append(lowercase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 453 | 1 |
'''simple docstring'''
__A : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =input("""Enter message: """ )
lowerCamelCase_ =input("""Enter key [alphanumeric]: """ )
lowerCamelCase_ =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCamelCase_ ="""encrypt"""
lowerCamelCase_ =encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowerCamelCase_ ="""decrypt"""
lowerCamelCase_ =decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(f'\n{mode.title()}ed message:' )
print(UpperCAmelCase__ )
def __UpperCamelCase ( _A : str , _A : str ) ->str:
"""simple docstring"""
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def __UpperCamelCase ( _A : str , _A : str ) ->str:
"""simple docstring"""
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->str:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =key.upper()
for symbol in message:
lowerCamelCase_ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowerCamelCase_ =0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 708 |
def __UpperCamelCase ( _A : str , _A : int ) ->str:
"""simple docstring"""
lowerCamelCase_ =[[] for _ in range(_A )]
lowerCamelCase_ =key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
lowerCamelCase_ =["""""".join(_A ) for row in temp_grid]
lowerCamelCase_ ="""""".join(_A )
return output_string
def __UpperCamelCase ( _A : str , _A : int ) ->str:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowerCamelCase_ =[[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowerCamelCase_ =0
for row in temp_grid: # fills in the characters
lowerCamelCase_ =input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
lowerCamelCase_ ="""""" # reads as zigzag
for position in range(len(_A ) ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCamelCase ( _A : str ) ->dict[int, str]:
"""simple docstring"""
lowerCamelCase_ ={}
for key_guess in range(1 , len(_A ) ): # tries every key
lowerCamelCase_ =decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Union[str, Any] = ['''image_processor''']
snake_case__ : int = '''SamImageProcessor'''
def __init__( self , a__ ):
super().__init__(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor
__SCREAMING_SNAKE_CASE : Optional[int] = -10
__SCREAMING_SNAKE_CASE : Any = self.image_processor.size["longest_edge"]
def __call__( self , a__=None , a__=None , a__=None , a__=None , a__ = None , **a__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(
a__ , return_tensors=a__ , **a__ , )
# pop arguments that are not used in the foward but used nevertheless
__SCREAMING_SNAKE_CASE : Any = encoding_image_processor["original_sizes"]
if hasattr(a__ , "numpy" ): # Checks if Torch or TF tensor
__SCREAMING_SNAKE_CASE : Optional[Any] = original_sizes.numpy()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self._check_and_preprocess_points(
input_points=a__ , input_labels=a__ , input_boxes=a__ , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._normalize_and_convert(
a__ , a__ , input_points=a__ , input_labels=a__ , input_boxes=a__ , return_tensors=a__ , )
return encoding_image_processor
def a_ ( self , a__ , a__ , a__=None , a__=None , a__=None , a__="pt" , ):
if input_points is not None:
if len(a__ ) != len(a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = [
self._normalize_coordinates(self.target_size , a__ , original_sizes[0] ) for point in input_points
]
else:
__SCREAMING_SNAKE_CASE : Dict = [
self._normalize_coordinates(self.target_size , a__ , a__ )
for point, original_size in zip(a__ , a__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self._pad_points_and_labels(a__ , a__ )
__SCREAMING_SNAKE_CASE : str = np.array(a__ )
if input_labels is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(a__ )
if input_boxes is not None:
if len(a__ ) != len(a__ ):
__SCREAMING_SNAKE_CASE : Any = [
self._normalize_coordinates(self.target_size , a__ , original_sizes[0] , is_bounding_box=a__ )
for box in input_boxes
]
else:
__SCREAMING_SNAKE_CASE : Any = [
self._normalize_coordinates(self.target_size , a__ , a__ , is_bounding_box=a__ )
for box, original_size in zip(a__ , a__ )
]
__SCREAMING_SNAKE_CASE : Tuple = np.array(a__ )
if input_boxes is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(a__ )
# boxes batch size of 1 by default
__SCREAMING_SNAKE_CASE : Any = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(a__ )
# boxes batch size of 1 by default
__SCREAMING_SNAKE_CASE : Any = tf.expand_dims(a__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(a__ )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(a__ )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(a__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(a__ )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE : int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(a__ )
# point batch size of 1 by default
__SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(a__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = max([point.shape[0] for point in input_points] )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, point in enumerate(a__ ):
if point.shape[0] != expected_nb_points:
__SCREAMING_SNAKE_CASE : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(a__ )
__SCREAMING_SNAKE_CASE : str = processed_input_points
return input_points, input_labels
def a_ ( self , a__ , a__ , a__ , a__=False ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = original_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor._get_preprocess_shape(a__ , longest_edge=a__ )
__SCREAMING_SNAKE_CASE : str = deepcopy(a__ ).astype(a__ )
if is_bounding_box:
__SCREAMING_SNAKE_CASE : str = coords.reshape(-1 , 2 , 2 )
__SCREAMING_SNAKE_CASE : int = coords[..., 0] * (new_w / old_w)
__SCREAMING_SNAKE_CASE : Optional[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__SCREAMING_SNAKE_CASE : Union[str, Any] = coords.reshape(-1 , 4 )
return coords
def a_ ( self , a__=None , a__=None , a__=None , ):
if input_points is not None:
if hasattr(a__ , "numpy" ): # Checks for TF or Torch tensor
__SCREAMING_SNAKE_CASE : Optional[int] = input_points.numpy().tolist()
if not isinstance(a__ , a__ ) or not isinstance(input_points[0] , a__ ):
raise ValueError("Input points must be a list of list of floating points." )
__SCREAMING_SNAKE_CASE : str = [np.array(a__ ) for input_point in input_points]
else:
__SCREAMING_SNAKE_CASE : Tuple = None
if input_labels is not None:
if hasattr(a__ , "numpy" ):
__SCREAMING_SNAKE_CASE : Dict = input_labels.numpy().tolist()
if not isinstance(a__ , a__ ) or not isinstance(input_labels[0] , a__ ):
raise ValueError("Input labels must be a list of list integers." )
__SCREAMING_SNAKE_CASE : Tuple = [np.array(a__ ) for label in input_labels]
else:
__SCREAMING_SNAKE_CASE : Any = None
if input_boxes is not None:
if hasattr(a__ , "numpy" ):
__SCREAMING_SNAKE_CASE : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(a__ , a__ )
or not isinstance(input_boxes[0] , a__ )
or not isinstance(input_boxes[0][0] , a__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
__SCREAMING_SNAKE_CASE : Any = [np.array(a__ ).astype(np.floataa ) for box in input_boxes]
else:
__SCREAMING_SNAKE_CASE : Dict = None
return input_points, input_labels, input_boxes
@property
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names
return list(dict.fromkeys(a__ ) )
def a_ ( self , *a__ , **a__ ):
return self.image_processor.post_process_masks(*a__ , **a__ )
| 211 | 0 |
def lowerCamelCase ( )-> Any:
"""simple docstring"""
a =0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 715 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int )-> int:
"""simple docstring"""
a =_distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any )-> Any:
"""simple docstring"""
a =_split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
a =_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 321 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict = "cpu" , __UpperCAmelCase : List[str] = None ) -> None:
__magic_name__: Optional[Any] = torch.load(A__ , map_location=A__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__magic_name__: Union[str, Any] = v.half()
if save_path is None: # overwrite src_path
__magic_name__: Any = src_path
torch.save(A__ , A__ )
if __name__ == "__main__":
fire.Fire(convert)
| 96 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A_ ( A__ ) -> Dict:
a__ : Dict = 384
if "tiny" in model_name:
a__ : Any = [3, 3, 9, 3]
a__ : List[str] = [96, 192, 384, 768]
if "small" in model_name:
a__ : List[Any] = [3, 3, 27, 3]
a__ : Tuple = [96, 192, 384, 768]
if "base" in model_name:
a__ : Union[str, Any] = [3, 3, 27, 3]
a__ : List[Any] = [128, 256, 512, 1024]
a__ : int = 512
if "large" in model_name:
a__ : Dict = [3, 3, 27, 3]
a__ : Tuple = [192, 384, 768, 1536]
a__ : Any = 768
if "xlarge" in model_name:
a__ : Union[str, Any] = [3, 3, 27, 3]
a__ : Dict = [256, 512, 1024, 2048]
a__ : Optional[int] = 1024
# set label information
a__ : Optional[int] = 150
a__ : Optional[Any] = 'huggingface/label-files'
a__ : Union[str, Any] = 'ade20k-id2label.json'
a__ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
a__ : str = {int(A__ ): v for k, v in idalabel.items()}
a__ : int = {v: k for k, v in idalabel.items()}
a__ : Tuple = ConvNextConfig(
depths=A__ , hidden_sizes=A__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
a__ : Dict = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def A_ ( A__ ) -> Union[str, Any]:
a__ : int = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A_ ( A__ , A__ , A__ ) -> Optional[Any]:
a__ : Union[str, Any] = dct.pop(A__ )
a__ : Tuple = val
def A_ ( A__ , A__ , A__ ) -> List[Any]:
a__ : Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
a__ : Union[str, Any] = model_name_to_url[model_name]
a__ : Optional[int] = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['state_dict']
a__ : Dict = get_upernet_config(A__ )
a__ : Dict = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a__ : List[Any] = state_dict.pop(A__ )
if "bn" in key:
a__ : Optional[int] = key.replace('bn' , 'batch_norm' )
a__ : Any = val
# rename keys
a__ : List[Any] = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
model.load_state_dict(A__ )
# verify on image
a__ : List[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
a__ : int = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
a__ : str = SegformerImageProcessor()
a__ : List[str] = processor(A__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
a__ : List[Any] = model(A__ )
if model_name == "upernet-convnext-tiny":
a__ : int = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
a__ : int = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
a__ : Tuple = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
a__ : str = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
a__ : Union[str, Any] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Dict = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
__A : Dict = parent
__A : List[str] = batch_size
__A : str = seq_length
__A : List[str] = is_training
__A : str = use_input_mask
__A : Union[str, Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : Dict = vocab_size
__A : List[str] = hidden_size
__A : Dict = num_hidden_layers
__A : Tuple = num_attention_heads
__A : int = intermediate_multiple_size
__A : Optional[int] = hidden_act
__A : Any = hidden_dropout
__A : Tuple = attention_dropout
__A : Dict = weight_tying
__A : Optional[int] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : str = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_labels
__A : Tuple = num_choices
__A : Optional[Any] = scope
def __UpperCAmelCase( self ):
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : int = None
if self.use_input_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : str = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self ):
__A , __A , __A , __A : Optional[Any] = self.prepare_config_and_inputs()
__A : Optional[int] = True
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__A : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = True
__A : List[str] = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[Any] = True
__A : List[str] = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__A : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__A : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
__A : Union[str, Any] = output_from_no_past["hidden_states"][0]
__A : str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
__A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[str] = config_and_inputs
__A : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Union[str, Any] = False
def __UpperCAmelCase( self ):
__A : Any = GPTNeoXJapaneseModelTester(self )
__A : int = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase( self ):
__A , __A , __A , __A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A , __A , __A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
# This regression test was failing with PyTorch < 1.3
__A , __A , __A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__A : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A , __A , __A , __A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def __UpperCAmelCase( self ):
__A : Tuple = "abeja/gpt-neox-japanese-2.7b"
__A : Optional[Any] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
__A : Any = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
__A : Optional[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
__A : str = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
__A : Dict = []
for prompt in prompts:
__A : List[str] = tokenizer(__UpperCAmelCase , return_tensors="pt" ).input_ids
__A : Optional[Any] = model.generate(__UpperCAmelCase , max_length=50 )
__A : int = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 387 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase_ ( _lowercase ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase_ ( _lowercase ) -> Dict:
__A : Dict = create_tensor(_lowercase )
__A : int = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
__A : Tuple = [state.process_index]
__A : Optional[int] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F"{gathered_obj}, {len(_lowercase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowerCamelCase_ ( _lowercase ) -> str:
__A : List[str] = create_tensor(_lowercase )
__A : Any = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__A : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A : Dict = torch.arange(state.num_processes ).to(state.device )
__A : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase_ ( _lowercase ) -> str:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Dict = create_tensor(_lowercase )
__A : int = reduce(_lowercase , "sum" )
__A : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Tuple = create_tensor(_lowercase )
__A : List[str] = reduce(_lowercase , "mean" )
__A : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def lowerCamelCase_ ( ) -> List[str]:
__A : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(_lowercase )
state.print("testing gather_object" )
test_gather_object(_lowercase )
state.print("testing broadcast" )
test_broadcast(_lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 387 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
UpperCamelCase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
UpperCamelCase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
UpperCamelCase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
UpperCamelCase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
UpperCamelCase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
UpperCamelCase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
UpperCamelCase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
UpperCamelCase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
UpperCamelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase_ = []
UpperCamelCase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
UpperCamelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
UpperCamelCase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
UpperCamelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ,__UpperCamelCase: Tuple ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE : List[str] = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE : List[str] = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: str ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
if task == "s2t":
SCREAMING_SNAKE_CASE : int = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE : str = MAPPING_S2T
SCREAMING_SNAKE_CASE : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = MAPPING_T2S
SCREAMING_SNAKE_CASE : str = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE : List[str] = MAPPING_S2S
SCREAMING_SNAKE_CASE : Optional[Any] = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCamelCase ,__UpperCamelCase ):
logger.info(f"{name} was ignored" )
continue
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == 'group' ,)
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = key.split('.*.' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(__UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace('*' ,__UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : str = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Any = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE : Any = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE : Any = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE : Tuple = 'running_mean'
elif "running_var" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE : Dict = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE : Tuple = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: List[Any] ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE : int = name.split('.' )
SCREAMING_SNAKE_CASE : Any = int(items[0] )
SCREAMING_SNAKE_CASE : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: int=None ,__UpperCamelCase: Optional[Any]=None ,__UpperCamelCase: int=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : Any = SpeechTaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE : Optional[Any] = config.max_text_positions
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaForSpeechToText(__UpperCamelCase )
elif task == "t2s":
SCREAMING_SNAKE_CASE : Optional[Any] = 18_76
SCREAMING_SNAKE_CASE : Optional[int] = 6_00
SCREAMING_SNAKE_CASE : List[Any] = config.max_speech_positions
SCREAMING_SNAKE_CASE : List[str] = SpeechTaForTextToSpeech(__UpperCamelCase )
elif task == "s2s":
SCREAMING_SNAKE_CASE : Optional[int] = 18_76
SCREAMING_SNAKE_CASE : int = config.max_speech_positions
SCREAMING_SNAKE_CASE : Any = SpeechTaForSpeechToSpeech(__UpperCamelCase )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
SCREAMING_SNAKE_CASE : Any = SpeechTaTokenizer(__UpperCamelCase ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : str = AddedToken('<mask>' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE : Dict = SpeechTaProcessor(tokenizer=__UpperCamelCase ,feature_extractor=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.load(__UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['model'] ,__UpperCamelCase ,__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 1 |
"""simple docstring"""
import math
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( _A = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
_UpperCAmelCase = int(_A )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_UpperCAmelCase = []
_UpperCAmelCase = 2
while len(_A ) < nth:
if is_prime(_A ):
primes.append(_A )
num += 1
else:
num += 1
return primes[len(_A ) - 1]
if __name__ == "__main__":
print(F"{solution() = }") | 19 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 1 |
import torch
from diffusers import StableDiffusionPipeline
_lowerCAmelCase : List[Any] = '''path-to-your-trained-model'''
_lowerCAmelCase : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCAmelCase : Tuple = '''A photo of sks dog in a bucket'''
_lowerCAmelCase : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 454 |
def __snake_case ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1000 ) -> int:
A_ : Optional[int] = 1
A_ : int = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
A_ : list[int] = []
A_ : Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
A_ : Optional[Any] = len(_lowerCAmelCase )
A_ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
A_ : Dict = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> list:
__A : Optional[int] = [0] * len(a )
for i in range(1 , len(a ) ):
# use last results for better performance - dynamic programming
__A : Optional[int] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__A : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__A : Optional[int] = j
return prefix_result
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return max(prefix_function(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77 | 0 |
def A_ ( A__ ) -> list:
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
a__ : List[Any] = []
def generate(A__ , A__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a__ , a__ : str = arr[k - 1], arr[i]
else: # k is odd
a__ , a__ : Optional[int] = arr[k - 1], arr[0]
generate(k - 1 , UpperCamelCase__ )
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
lowercase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 302 |
import math
from datetime import datetime, timedelta
def _A( UpperCamelCase__ : int ) -> datetime:
'''simple docstring'''
__lowercase = year % 19
__lowercase = year % 4
__lowercase = year % 7
__lowercase = math.floor(year / 100 )
__lowercase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowercase = leap_day_inhibits / 4
__lowercase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowercase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowercase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 18 )
else:
return datetime(UpperCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCAmelCase__ = "will be" if year > datetime.now().year else "was"
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 332 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''Input value must be an \'int\' type''' )
a_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.