code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __a ( lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase_= 1
UpperCAmelCase_= 1
while repunit:
UpperCAmelCase_= (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __a ( lowerCAmelCase_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_= limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCAmelCase_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 593 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[Any] = "esm"
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[Any]=3_072 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[Any]=1_026 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : str=1E-12 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Optional[int] , ) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase , mask_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= use_cache
UpperCAmelCase_= emb_layer_norm_before
UpperCAmelCase_= token_dropout
UpperCAmelCase_= is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
UpperCAmelCase_= EsmFoldConfig()
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= EsmFoldConfig(**__UpperCAmelCase )
UpperCAmelCase_= esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
UpperCAmelCase_= get_default_vocab_list()
else:
UpperCAmelCase_= vocab_list
else:
UpperCAmelCase_= None
UpperCAmelCase_= None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __UpperCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_= super().to_dict()
if isinstance(self.esmfold_config , __UpperCAmelCase ):
UpperCAmelCase_= self.esmfold_config.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : str = None
a__ : bool = True
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : float = 0
a__ : bool = True
a__ : bool = False
a__ : int = 128
a__ : "TrunkConfig" = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self.trunk is None:
UpperCAmelCase_= TrunkConfig()
elif isinstance(self.trunk , __UpperCAmelCase ):
UpperCAmelCase_= TrunkConfig(**self.trunk )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= asdict(self )
UpperCAmelCase_= self.trunk.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : int = 48
a__ : int = 1024
a__ : int = 128
a__ : int = 32
a__ : int = 32
a__ : int = 32
a__ : float = 0
a__ : float = 0
a__ : bool = False
a__ : int = 4
a__ : Optional[int] = 128
a__ : "StructureModuleConfig" = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
if self.structure_module is None:
UpperCAmelCase_= StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCAmelCase ):
UpperCAmelCase_= StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_= self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_= self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_= asdict(self )
UpperCAmelCase_= self.structure_module.to_dict()
return output
@dataclass
class lowercase :
"""simple docstring"""
a__ : int = 384
a__ : int = 128
a__ : int = 16
a__ : int = 128
a__ : int = 12
a__ : int = 4
a__ : int = 8
a__ : float = 0.1
a__ : int = 8
a__ : int = 1
a__ : int = 2
a__ : int = 7
a__ : int = 10
a__ : float = 1e-8
a__ : float = 1e5
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return asdict(self )
def __a ( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 593 | 1 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_A: List[str] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_A : Optional[datasets.Features] = None
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , )-> List[str]:
import pyspark
def generate_fn():
__UpperCAmelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__UpperCAmelCase = df_with_partition_id.select('*' ).where(F'part_id = {partition_id}' ).drop('part_id' )
__UpperCAmelCase = partition_df.collect()
__UpperCAmelCase = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase ( _BaseExamplesIterable ):
def __init__( self , __A , __A=None , ):
__UpperCAmelCase = df
__UpperCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__UpperCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def __lowerCamelCase ( self ):
return len(self.partition_order )
class UpperCAmelCase ( datasets.DatasetBuilder ):
_A : Union[str, Any] = SparkConfig
def __init__( self , __A , __A = None , __A = None , **__A , ):
import pyspark
__UpperCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__UpperCAmelCase = df
__UpperCAmelCase = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def __lowerCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
__UpperCAmelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__UpperCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self , __A ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCamelCase ( self , __A ):
import pyspark
def get_arrow_batch_size(__A ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__UpperCAmelCase = self.df.count()
__UpperCAmelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__UpperCAmelCase = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__UpperCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__UpperCAmelCase = min(__A , int(approx_total_size / max_shard_size ) )
__UpperCAmelCase = self.df.repartition(__A )
def __lowerCamelCase ( self , __A , __A , __A , ):
import pyspark
__UpperCAmelCase = ParquetWriter if file_format == 'parquet' else ArrowWriter
__UpperCAmelCase = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
__UpperCAmelCase = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__UpperCAmelCase = self.config.features
__UpperCAmelCase = self._writer_batch_size
__UpperCAmelCase = self._fs.storage_options
def write_arrow(__A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__UpperCAmelCase = pyspark.TaskContext().taskAttemptId()
__UpperCAmelCase = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__UpperCAmelCase = 0
__UpperCAmelCase = writer_class(
features=__A , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
__UpperCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__UpperCAmelCase , __UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__UpperCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
__UpperCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
__UpperCAmelCase , __UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
__UpperCAmelCase = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
__UpperCAmelCase = (
self.df.mapInArrow(__A , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCamelCase ( self , __A , __A = "arrow" , __A = None , __A = None , **__A , ):
self._validate_cache_dir()
__UpperCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
__UpperCAmelCase = not is_remote_filesystem(self._fs )
__UpperCAmelCase = os.path.join if is_local else posixpath.join
__UpperCAmelCase = '-TTTTT-SSSSS-of-NNNNN'
__UpperCAmelCase = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__UpperCAmelCase = path_join(self._output_dir , __A )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = []
__UpperCAmelCase = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
__UpperCAmelCase = total_num_examples
__UpperCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
__UpperCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__UpperCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A , __A , __A , ):
rename(
__A , fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , f'{global_shard_id:05d}' ).replace('NNNNN' , f'{total_shards:05d}' ) , )
__UpperCAmelCase = []
__UpperCAmelCase = 0
for i in range(len(__A ) ):
__UpperCAmelCase , __UpperCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
__UpperCAmelCase = 0
__UpperCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace(__A , '' ) , )
def __lowerCamelCase ( self , __A , ):
return SparkExamplesIterable(self.df )
| 720 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> str | Literal[False]:
__UpperCAmelCase = list(_lowerCAmelCase )
__UpperCAmelCase = list(_lowerCAmelCase )
__UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
__UpperCAmelCase = '_'
if count > 1:
return False
else:
return "".join(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase )-> list[str]:
__UpperCAmelCase = []
while True:
__UpperCAmelCase = ['$'] * len(_lowerCAmelCase )
__UpperCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
__UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
__UpperCAmelCase = '*'
__UpperCAmelCase = '*'
temp.append('X' )
for i in range(len(_lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCAmelCase ) == 0:
return pi
__UpperCAmelCase = list(set(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> list[str]:
__UpperCAmelCase = []
for minterm in minterms:
__UpperCAmelCase = ''
for _ in range(_lowerCAmelCase ):
__UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCAmelCase )
return temp
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> bool:
__UpperCAmelCase = list(_lowerCAmelCase )
__UpperCAmelCase = list(_lowerCAmelCase )
__UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> list[str]:
__UpperCAmelCase = []
__UpperCAmelCase = [0] * len(_lowerCAmelCase )
for i in range(len(chart[0] ) ):
__UpperCAmelCase = 0
__UpperCAmelCase = -1
for j in range(len(_lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
__UpperCAmelCase = j
if count == 1:
__UpperCAmelCase = 1
for i in range(len(_lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCAmelCase ) ):
__UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
__UpperCAmelCase = 0
__UpperCAmelCase = -1
__UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
__UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
__UpperCAmelCase = count_n
__UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCAmelCase ) ):
__UpperCAmelCase = 0
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> list[list[int]]:
__UpperCAmelCase = [[0 for x in range(len(_lowerCAmelCase ) )] for x in range(len(_lowerCAmelCase ) )]
for i in range(len(_lowerCAmelCase ) ):
__UpperCAmelCase = prime_implicants[i].count('_' )
for j in range(len(_lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCAmelCase ):
__UpperCAmelCase = 1
return chart
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase = int(input('Enter the no. of variables\n' ) )
__UpperCAmelCase = [
float(_lowerCAmelCase )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__UpperCAmelCase = decimal_to_binary(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = check(_lowerCAmelCase )
print('Prime Implicants are:' )
print(_lowerCAmelCase )
__UpperCAmelCase = prime_implicant_chart(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = selection(_lowerCAmelCase , _lowerCAmelCase )
print('Essential Prime Implicants are:' )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 617 | 0 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
def a__ ( A__ = 5_0_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE_ : Optional[int] = int((limit - 2_4) ** (1 / 2) )
SCREAMING_SNAKE_CASE_ : Dict = set(range(3, prime_square_limit + 1, 2 ) )
primes.add(2 )
for p in range(3, prime_square_limit + 1, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, prime_square_limit + 1, A__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE_ : int = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE_ : Dict = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
SCREAMING_SNAKE_CASE_ : Optional[int] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE_ : str = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 | 0 |
'''simple docstring'''
import os
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.dirname(snake_case ) + '/p022_names.txt' ) as file:
lowerCAmelCase = str(file.readlines()[0] )
lowerCAmelCase = names.replace('"' , '' ).split(',' )
names.sort()
lowerCAmelCase = 0
lowerCAmelCase = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 514 |
'''simple docstring'''
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 5_0 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
__a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
__a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 559 | from manim import *
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('CPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('GPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Model' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Disk' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
__a = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
__a = Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
__a = Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
__a = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a = AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a = a_c
__a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
__a = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 559 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : int = CTRLTokenizer
_A : int = False
_A : Optional[Any] = False
def A_ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCamelCase__ = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def A_ ( self : Any , **_a : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def A_ ( self : List[str] , _a : int ):
UpperCamelCase__ = """adapt react readapt apt"""
UpperCamelCase__ = """adapt react readapt apt"""
return input_text, output_text
def A_ ( self : str ):
UpperCamelCase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """adapt react readapt apt"""
UpperCamelCase__ = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCamelCase__ = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
| 720 | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('''tpu-config''', description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('''Accelerate tpu-config command''', description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'''Config Arguments''', '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Path to the config file to use for accelerate.''', )
config_args.add_argument(
'''--tpu_name''', default=UpperCamelCase__, help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''', )
config_args.add_argument(
'''--tpu_zone''', default=UpperCamelCase__, help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''', )
UpperCamelCase__ = parser.add_argument_group('''TPU Arguments''', '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''', action='''store_true''', help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''', )
pod_args.add_argument(
'''--command_file''', default=UpperCamelCase__, help='''The path to the file containing the commands to run on the pod on startup.''', )
pod_args.add_argument(
'''--command''', action='''append''', nargs='''+''', help='''A command to run on the pod. Can be passed multiple times.''', )
pod_args.add_argument(
'''--install_accelerate''', action='''store_true''', help='''Whether to install accelerate on the pod. Defaults to False.''', )
pod_args.add_argument(
'''--accelerate_version''', default='''latest''', help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''', )
pod_args.add_argument(
'''--debug''', action='''store_true''', help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase__ ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCamelCase__ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ), UpperCamelCase__ ):
UpperCamelCase__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file, '''r''' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCamelCase__ ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase__ = '''; '''.join(UpperCamelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(UpperCamelCase__ )}""" )
return
subprocess.run(UpperCamelCase__ )
print('''Successfully setup pod.''' )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(UpperCamelCase__ )
| 591 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase ="docs/source/en/_toctree.yml"
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = defaultdict(UpperCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
__lowerCamelCase = [key for key, value in counts.items() if value > 1]
__lowerCamelCase = []
for duplicate_key in duplicates:
__lowerCamelCase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
def __lowerCAmelCase ( UpperCamelCase__=False ) -> Optional[Any]:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase = content[api_idx]['''sections''']
# Then to the model doc
__lowerCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__lowerCamelCase = api_doc[model_idx]['''sections''']
__lowerCamelCase = [(idx, section) for idx, section in enumerate(UpperCamelCase__ ) if '''sections''' in section]
__lowerCamelCase = False
for idx, modality_doc in modalities_docs:
__lowerCamelCase = modality_doc['''sections''']
__lowerCamelCase = clean_model_doc_toc(UpperCamelCase__ )
if old_modality_doc != new_modality_doc:
__lowerCamelCase = True
if overwrite:
__lowerCamelCase = new_modality_doc
if diff:
if overwrite:
__lowerCamelCase = model_doc
__lowerCamelCase = api_doc
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 546 | '''simple docstring'''
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCamelCase = 0
for i in range(1 , 10_01 ):
total += i**i
return str(UpperCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 546 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , __A : List[Any] , __A : Dict=13 , __A : Union[str, Any]=7 , __A : Optional[Any]=True , __A : Tuple=True , __A : List[str]=True , __A : List[str]=True , __A : Dict=99 , __A : Any=32 , __A : str=2 , __A : Optional[int]=4 , __A : Tuple=37 , __A : List[str]="gelu" , __A : int=0.1 , __A : Dict=0.1 , __A : List[Any]=512 , __A : List[Any]=16 , __A : List[str]=2 , __A : Union[str, Any]=0.0_2 , __A : Tuple=3 , __A : Optional[Any]=4 , __A : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 384
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = """gelu"""
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.0_2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = 128
lowerCAmelCase__ = 2
lowerCAmelCase__ = 9
lowerCAmelCase__ = 1
lowerCAmelCase__ = None
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any , __A : Optional[Any] , __A : Any , __A : str , __A : Dict , __A : str , __A : Dict , __A : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertModel(config=__A )
lowerCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__A )
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __A : Any , __A : Tuple , __A : List[Any] , __A : List[Any] , __A : Tuple , __A : Tuple , __A : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertForMaskedLM(config=__A )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , __A : Tuple , __A : Optional[Any] , __A : Dict , __A : List[Any] , __A : int , __A : Dict , __A : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFConvBertForSequenceClassification(config=__A )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __A : List[Any] , __A : Any , __A : List[str] , __A : Tuple , __A : Any , __A : Dict , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFConvBertForMultipleChoice(config=__A )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , __A : Optional[int] , __A : List[str] , __A : Tuple , __A : List[str] , __A : List[str] , __A : Optional[int] , __A : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFConvBertForTokenClassification(config=__A )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __A : Union[str, Any] , __A : int , __A : List[str] , __A : List[str] , __A : Optional[int] , __A : List[str] , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertForQuestionAnswering(config=__A )
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A, _A, unittest.TestCase ):
'''simple docstring'''
A__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = True
if hasattr(__A , """use_cache""" ):
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , """key_length""" , __A )
for model_class in self.all_model_classes:
lowerCAmelCase__ = self._prepare_for_class(__A , __A )
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = len(model(__A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
lowerCAmelCase__ = os.path.join(__A , """saved_model""" , """1""" )
lowerCAmelCase__ = tf.keras.models.load_model(__A )
lowerCAmelCase__ = model(__A )
if self.is_encoder_decoder:
lowerCAmelCase__ = outputs["""encoder_hidden_states"""]
lowerCAmelCase__ = outputs["""encoder_attentions"""]
else:
lowerCAmelCase__ = outputs["""hidden_states"""]
lowerCAmelCase__ = outputs["""attentions"""]
self.assertEqual(len(__A ) , __A )
lowerCAmelCase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__A )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , """key_length""" , __A )
lowerCAmelCase__ = getattr(self.model_tester , """key_length""" , __A )
def check_decoder_attentions_output(__A : Tuple ):
lowerCAmelCase__ = len(__A )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__A : Union[str, Any] ):
lowerCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = model(self._prepare_for_class(__A , __A ) )
lowerCAmelCase__ = len(__A )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
if self.is_encoder_decoder:
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_decoder_attentions_output(__A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) )
self.assertEqual(model.config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(__A )[0]
lowerCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __A )
lowerCAmelCase__ = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-4 )
| 211 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_UpperCamelCase = False
@skip_mps
class lowerCamelCase__ ( _A, _A, _A, unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionAttendAndExcitePipeline
A__ = False
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowercase__ ( cls : Optional[int] ) -> Any:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__A )
@classmethod
def lowercase__ ( cls : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__A )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(__A )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Tuple , __A : Optional[int] , __A : Optional[int]=0 ) -> List[Any]:
'''simple docstring'''
if str(__A ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(__A )
else:
lowerCAmelCase__ = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase__ = lowerCAmelCase__ = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = """cpu"""
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase__ = self.get_dummy_inputs(__A )
lowerCAmelCase__ = pipe(**__A ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase__ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : Optional[int] ) -> Tuple:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__A )
@classmethod
def lowercase__ ( cls : int ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__A )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.manual_seed(51 )
lowerCAmelCase__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__A , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCAmelCase__ = """a painting of an elephant with glasses"""
lowerCAmelCase__ = [5, 7]
lowerCAmelCase__ = pipe(
prompt=__A , token_indices=__A , guidance_scale=7.5 , generator=__A , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 211 | 1 |
"""simple docstring"""
import pprint
import requests
A_ = '''https://zenquotes.io/api'''
def UpperCAmelCase__ ():
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def UpperCAmelCase__ ():
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 609 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "open-llama"
def __init__( self: int, a_: List[str]=100_000, a_: List[str]=4_096, a_: int=11_008, a_: Tuple=32, a_: Any=32, a_: Optional[Any]="silu", a_: Any=2_048, a_: List[Any]=0.02, a_: int=1E-6, a_: Optional[int]=True, a_: List[str]=0, a_: Any=1, a_: Optional[int]=2, a_: Tuple=False, a_: List[Any]=True, a_: Optional[int]=0.1, a_: Tuple=0.1, a_: List[Any]=True, a_: Optional[int]=True, a_: Dict=None, **a_: int, ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Dict = intermediate_size
_snake_case : str = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : Dict = initializer_range
_snake_case : Tuple = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""", a_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_dropout_prob
_snake_case : Optional[int] = use_stable_embedding
_snake_case : int = shared_input_output_embedding
_snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, tie_word_embeddings=a_, **a_, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_snake_case : Optional[int] = self.rope_scaling.get("""type""", a_ )
_snake_case : Optional[int] = self.rope_scaling.get("""factor""", a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(a_, a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 609 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "longformer"
def __init__( self : Optional[int] , A__ : Union[List[int], int] = 5_1_2 , A__ : int = 2 , A__ : int = 1 , A__ : int = 0 , A__ : int = 2 , A__ : int = 3_0_5_2_2 , A__ : int = 7_6_8 , A__ : int = 1_2 , A__ : int = 1_2 , A__ : int = 3_0_7_2 , A__ : str = "gelu" , A__ : float = 0.1 , A__ : float = 0.1 , A__ : int = 5_1_2 , A__ : int = 2 , A__ : float = 0.02 , A__ : float = 1E-12 , A__ : bool = False , **A__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
a__ : Optional[int] = attention_window
a__ : List[str] = sep_token_id
a__ : Any = bos_token_id
a__ : int = eos_token_id
a__ : Dict = vocab_size
a__ : Union[str, Any] = hidden_size
a__ : List[str] = num_hidden_layers
a__ : str = num_attention_heads
a__ : Any = hidden_act
a__ : List[Any] = intermediate_size
a__ : Any = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : str = type_vocab_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = layer_norm_eps
a__ : List[str] = onnx_export
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A__ : "PretrainedConfig" , A__ : str = "default" , A__ : "List[PatchingSpec]" = None ) -> Dict:
'''simple docstring'''
super().__init__(A__ , A__ , A__ )
a__ : Any = True
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : Union[str, Any] = super().outputs
if self.task == "default":
a__ : List[Any] = {0: '''batch'''}
return outputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : "PreTrainedTokenizerBase" , A__ : int = -1 , A__ : int = -1 , A__ : bool = False , A__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Optional[int] = super().generate_dummy_inputs(
preprocessor=A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a__ : Dict = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
a__ : Any = 1
return inputs
| 340 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a__ : List[str] = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
a__ : Tuple = grid[row_n]
a__ : Union[str, Any] = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] = grid[row_n]
return grid[-1][-1]
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE__ = quote(UpperCamelCase_ )
return hfh.hf_hub_url(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' , revision=UpperCamelCase_ )
| 472 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE ) )
return data_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = min(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__lowerCAmelCase : int = F'''Invalid weight of {weight:f} provided'''
raise ValueError(SCREAMING_SNAKE_CASE )
score_lists.append(SCREAMING_SNAKE_CASE )
return score_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[float]:
__lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = final_scores[j] + ele
return final_scores
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : str = get_data(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = calculate_each_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = generate_final_scores(SCREAMING_SNAKE_CASE )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE ):
source_data[i].append(SCREAMING_SNAKE_CASE )
return source_data | 504 | 0 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int = 2_00 ) -> int:
'''simple docstring'''
_a = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 532 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = DiTPipeline
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__magic_name__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=__magic_name__ , )
_a = AutoencoderKL()
_a = DDIMScheduler()
_a = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> Union[str, Any]:
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> Tuple:
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_a = self.get_dummy_inputs(__magic_name__ )
_a = pipe(**__magic_name__ ).images
_a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_a = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=__magic_name__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = torch.manual_seed(0 )
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_a = ['vase', 'umbrella', 'white shark', 'white wolf']
_a = pipe.get_label_ids(__magic_name__ )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __UpperCAmelCase ( self ) -> Any:
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_a = ['vase', 'umbrella']
_a = pipe.get_label_ids(__magic_name__ )
_a = torch.manual_seed(0 )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 532 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: str ) -> List[Any]:
snake_case__ = dataset
snake_case__ = process
snake_case__ = params
def __len__( self: Any ) -> Any:
return len(self.dataset )
def __getitem__( self: Optional[Any] , UpperCamelCase: Optional[int] ) -> Dict:
snake_case__ = self.dataset[i]
snake_case__ = self.process(UpperCamelCase , **self.params )
return processed
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Any , UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: int=None ) -> Union[str, Any]:
snake_case__ = loader
snake_case__ = infer
snake_case__ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case__ = None
snake_case__ = loader_batch_size
# Internal bookkeeping
snake_case__ = None
snake_case__ = None
def __len__( self: List[Any] ) -> Any:
return len(self.loader )
def __iter__( self: Optional[int] ) -> List[Any]:
snake_case__ = iter(self.loader )
return self
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case__ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case__ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
# Convert ModelOutput to tuple first
snake_case__ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase , UpperCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case__ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case__ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case__ = self._loader_batch_data.__class__(UpperCamelCase )
self._loader_batch_index += 1
return result
def lowerCAmelCase_ ( self: int ) -> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case__ = next(self.iterator )
snake_case__ = self.infer(UpperCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = processed
else:
snake_case__ = list(processed.keys() )[0]
snake_case__ = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = len(UpperCamelCase )
else:
snake_case__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ = observed_batch_size
# Setting internal index to unwrap the batch
snake_case__ = processed
snake_case__ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any]=None ) -> Optional[Any]:
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def __iter__( self: str ) -> Tuple:
snake_case__ = iter(self.loader )
snake_case__ = None
return self
def lowerCAmelCase_ ( self: Any ) -> Dict:
if self.subiterator is None:
snake_case__ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case__ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case__ = self.infer(next(self.iterator ) , **self.params )
snake_case__ = next(self.subiterator )
return processed
class __SCREAMING_SNAKE_CASE( a_ ):
def __iter__( self: int ) -> Any:
snake_case__ = iter(self.loader )
return self
def lowerCAmelCase_ ( self: Any ) -> List[str]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
snake_case__ = False
snake_case__ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case__ = self.loader_batch_item()
snake_case__ = item.pop('is_last' )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
while not is_last:
snake_case__ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = processed
else:
snake_case__ = list(processed.keys() )[0]
snake_case__ = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = len(UpperCamelCase )
else:
snake_case__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ = observed_batch_size
snake_case__ = processed
snake_case__ = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case__ = self.loader_batch_item()
snake_case__ = item.pop('is_last' )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
else:
snake_case__ = processed
snake_case__ = item.pop('is_last' )
accumulator.append(UpperCamelCase )
return accumulator
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: Dataset , UpperCamelCase: str ) -> str:
snake_case__ = dataset
snake_case__ = key
def __len__( self: int ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__( self: Tuple , UpperCamelCase: str ) -> Tuple:
return self.dataset[i][self.key]
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Optional[Any] , UpperCamelCase: Dataset , UpperCamelCase: str , UpperCamelCase: str ) -> Any:
snake_case__ = dataset
snake_case__ = keya
snake_case__ = keya
def __len__( self: Optional[int] ) -> Dict:
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase: Optional[Any] ) -> Tuple:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 328 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "timesformer"
def __init__( self: Dict , UpperCamelCase: Optional[Any]=2_24 , UpperCamelCase: int=16 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=8 , UpperCamelCase: List[Any]=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: List[str]=12 , UpperCamelCase: Dict=30_72 , UpperCamelCase: str="gelu" , UpperCamelCase: Any=0.0 , UpperCamelCase: int=0.0 , UpperCamelCase: int=0.02 , UpperCamelCase: Optional[int]=1e-6 , UpperCamelCase: Tuple=True , UpperCamelCase: Tuple="divided_space_time" , UpperCamelCase: int=0 , **UpperCamelCase: List[str] , ) -> str:
super().__init__(**UpperCamelCase )
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = qkv_bias
snake_case__ = attention_type
snake_case__ = drop_path_rate
| 328 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , ) -> Any:
if config_name_or_path is None:
_snake_case : List[str] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
_snake_case : List[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_snake_case : Optional[Any] = question_encoder_name_or_path
_snake_case : Optional[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
_snake_case : str = RagConfig.from_pretrained(_A )
_snake_case : Tuple = AutoConfig.from_pretrained(_A )
_snake_case : Tuple = AutoConfig.from_pretrained(_A )
_snake_case : Dict = gen_config
_snake_case : Dict = question_encoder_config
_snake_case : List[Any] = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
_snake_case : Any = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_snake_case : List[str] = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
a__ = parser.parse_args()
a__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 705 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case :
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : str=None , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = TFVisionTextDualEncoderModel(lowerCAmelCase)
_snake_case : Dict = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : List[str] = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
_snake_case : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase)
_snake_case : List[Any] = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : str = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
_snake_case : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase)
_snake_case : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase)
_snake_case : Tuple = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase)
_snake_case : Optional[int] = after_output[0].numpy()
_snake_case : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase , 1E-5)
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
_snake_case , _snake_case : List[str] = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : Tuple = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase)
_snake_case : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Any = to_atuple(vision_model.config.image_size)
_snake_case : int = to_atuple(vision_model.config.patch_size)
_snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_snake_case : int = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray , lowerCAmelCase : float) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = np.abs((a - b)).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase_ ( self : Any) -> int:
"""simple docstring"""
_snake_case : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase)
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase)
@slow
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_pretrained_model_and_inputs()
_snake_case : Optional[Any] = model_a(**lowerCAmelCase)
_snake_case : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase)
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase)
_snake_case : Dict = model_a(**lowerCAmelCase)
_snake_case : int = after_outputs[0].numpy()
_snake_case : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase , 1E-5)
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""")
_snake_case : Union[str, Any] = 13
_snake_case : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : Tuple = random_attention_mask([batch_size, 4])
_snake_case : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFViTModel(lowerCAmelCase , name="""vision_model""")
_snake_case : List[Any] = TFBertModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = TFViTModelTester(self)
_snake_case : Optional[Any] = TFBertModelTester(self)
_snake_case : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_snake_case : int = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Dict = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""")
_snake_case : int = 13
_snake_case : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : int = random_attention_mask([batch_size, 4])
_snake_case : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int=None , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Any = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase)
_snake_case : int = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase)
_snake_case : Optional[Any] = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase)
_snake_case : Dict = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_snake_case : List[Any] = to_atuple(vision_model.config.image_size)
_snake_case : str = to_atuple(vision_model.config.patch_size)
_snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case : List[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_snake_case : Dict = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[int] = TFDeiTModel(lowerCAmelCase , name="""vision_model""")
_snake_case : Optional[int] = TFRobertaModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = TFDeiTModelTester(self)
_snake_case : Tuple = TFRobertaModelTester(self)
_snake_case : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_snake_case : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[Any] = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""")
_snake_case : str = 13
_snake_case : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_snake_case : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_snake_case : str = random_attention_mask([batch_size, 4])
_snake_case : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = TFCLIPVisionModel(lowerCAmelCase , name="""vision_model""")
_snake_case : List[Any] = TFBertModel(lowerCAmelCase , name="""text_model""")
return vision_model, text_model
def UpperCamelCase_ ( self : str) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = TFCLIPVisionModelTester(self)
_snake_case : int = TFBertModelTester(self)
_snake_case : Any = clip_model_tester.prepare_config_and_inputs()
_snake_case : str = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case : Optional[Any] = vision_config_and_inputs
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase)
_snake_case : str = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""")
_snake_case : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_snake_case : List[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""np""")
_snake_case : Optional[int] = model(**lowerCAmelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case : int = np.array([[1.2_284_727, 0.3_104_122]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase , atol=1E-3))
| 198 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """Pix2StructImageProcessor"""
lowercase_ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =False
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__=None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 2_0_4_8 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__A =self.tokenizer
__A =self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__A =self.image_processor(
lowercase__ , return_tensors=lowercase__ , max_patches=lowercase__ , **lowercase__ )
else:
# add pixel_values and bbox
__A =self.image_processor(
lowercase__ , return_tensors=lowercase__ , max_patches=lowercase__ , header_text=lowercase__ , **lowercase__ )
if text is not None and not self.image_processor.is_vqa:
__A =self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if "attention_mask" in text_encoding:
__A =text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__A =text_encoding.pop('''input_ids''' )
else:
__A =None
if text_encoding is not None:
encoding_image_processor.update(lowercase__ )
return encoding_image_processor
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer.model_input_names
__A =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 184 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__=-1 ):
'''simple docstring'''
__A =label_idx
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
__A =[]
__A =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
__A =[]
__A =[]
else:
__A =line.split(''' ''' )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__A =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowercase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowercase__ ):
__A =[]
__A =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for sentence in parse_incr(lowercase__ ):
__A =preds_list[example_id]
__A =''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 184 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = ['''pixel_values''']
def __init__( self :Optional[Any] ,__UpperCAmelCase :bool = True ,__UpperCAmelCase :Optional[Dict[str, int]] = None ,__UpperCAmelCase :PILImageResampling = PILImageResampling.BILINEAR ,__UpperCAmelCase :bool = True ,__UpperCAmelCase :Dict[str, int] = None ,__UpperCAmelCase :bool = True ,__UpperCAmelCase :Union[int, float] = 1 / 2_55 ,__UpperCAmelCase :bool = True ,__UpperCAmelCase :Optional[Union[float, List[float]]] = None ,__UpperCAmelCase :Optional[Union[float, List[float]]] = None ,**__UpperCAmelCase :Tuple ,) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_56}
lowerCamelCase__ : Dict = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
lowerCamelCase__ : Dict = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCamelCase__ : Any = get_size_dict(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : Dict = size
lowerCamelCase__ : str = resample
lowerCamelCase__ : Dict = do_center_crop
lowerCamelCase__ : Optional[int] = crop_size
lowerCamelCase__ : Any = do_rescale
lowerCamelCase__ : List[Any] = rescale_factor
lowerCamelCase__ : Tuple = do_normalize
lowerCamelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self :str ,__UpperCAmelCase :np.ndarray ,__UpperCAmelCase :Dict[str, int] ,__UpperCAmelCase :PILImageResampling = PILImageResampling.BICUBIC ,__UpperCAmelCase :Optional[Union[str, ChannelDimension]] = None ,**__UpperCAmelCase :Any ,) -> np.ndarray:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase__ : List[Any] = get_resize_output_image_size(__UpperCAmelCase ,size=size['''shortest_edge'''] ,default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :np.ndarray ,__UpperCAmelCase :Dict[str, int] ,__UpperCAmelCase :Optional[Union[str, ChannelDimension]] = None ,**__UpperCAmelCase :str ,) -> np.ndarray:
"""simple docstring"""
lowerCamelCase__ : int = get_size_dict(__UpperCAmelCase )
return center_crop(__UpperCAmelCase ,size=(size['''height'''], size['''width''']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def lowercase_ ( self :Dict ,__UpperCAmelCase :np.ndarray ,__UpperCAmelCase :float ,__UpperCAmelCase :Optional[Union[str, ChannelDimension]] = None ,**__UpperCAmelCase :Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def lowercase_ ( self :str ,__UpperCAmelCase :np.ndarray ,__UpperCAmelCase :Union[float, List[float]] ,__UpperCAmelCase :Union[float, List[float]] ,__UpperCAmelCase :Optional[Union[str, ChannelDimension]] = None ,**__UpperCAmelCase :Tuple ,) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :ImageInput ,__UpperCAmelCase :Optional[bool] = None ,__UpperCAmelCase :Dict[str, int] = None ,__UpperCAmelCase :PILImageResampling = None ,__UpperCAmelCase :bool = None ,__UpperCAmelCase :Dict[str, int] = None ,__UpperCAmelCase :Optional[bool] = None ,__UpperCAmelCase :Optional[float] = None ,__UpperCAmelCase :Optional[bool] = None ,__UpperCAmelCase :Optional[Union[float, List[float]]] = None ,__UpperCAmelCase :Optional[Union[float, List[float]]] = None ,__UpperCAmelCase :Optional[Union[str, TensorType]] = None ,__UpperCAmelCase :Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__UpperCAmelCase :str ,) -> Dict:
"""simple docstring"""
lowerCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : int = size if size is not None else self.size
lowerCamelCase__ : str = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
lowerCamelCase__ : Dict = resample if resample is not None else self.resample
lowerCamelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Optional[int] = get_size_dict(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Any = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Union[str, Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[int] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : List[str] = [self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Union[str, Any] = [self.center_crop(image=__UpperCAmelCase ,size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Dict = [self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[str] = [self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ) for image in images]
lowerCamelCase__ : int = [to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase ) for image in images]
lowerCamelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 121 | """simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase ) as metadata_file:
lowerCamelCase__ : List[Any] = json.load(_lowercase )
lowerCamelCase__ : List[Any] = LukeConfig(use_entity_aware_attention=_lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCamelCase__ : Dict = torch.load(_lowercase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
lowerCamelCase__ : Dict = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
lowerCamelCase__ : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase__ : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase__ : Any = AddedToken('''<ent>''' , lstrip=_lowercase , rstrip=_lowercase )
lowerCamelCase__ : int = AddedToken('''<ent2>''' , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.load(_lowercase )
lowerCamelCase__ : Optional[int] = '''MLukeTokenizer'''
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
lowerCamelCase__ : Union[str, Any] = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
lowerCamelCase__ : int = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowerCamelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowerCamelCase__ : Any = state_dict['''embeddings.word_embeddings.weight''']
lowerCamelCase__ : Dict = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase__ : Any = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase__ : Optional[Any] = state_dict[bias_name]
lowerCamelCase__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase__ : List[str] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase__ : Optional[int] = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase__ : List[Any] = state_dict[prefix + matrix_name]
lowerCamelCase__ : int = state_dict[prefix + matrix_name]
lowerCamelCase__ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase__ : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCamelCase__ : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCamelCase__ : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase__ : Tuple = state_dict['''entity_predictions.bias''']
lowerCamelCase__ : Dict = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCamelCase__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase__ : Optional[Any] = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowerCamelCase__ : List[Any] = state_dict[key]
else:
lowerCamelCase__ : Dict = state_dict[key]
lowerCamelCase__ , lowerCamelCase__ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase__ : Tuple = MLukeTokenizer.from_pretrained(_lowercase , task='''entity_classification''' )
lowerCamelCase__ : List[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
lowerCamelCase__ : Optional[int] = (0, 9)
lowerCamelCase__ : str = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCamelCase__ : int = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ : List[Any] = torch.Size((1, 33, 768) )
lowerCamelCase__ : Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ : str = torch.Size((1, 1, 768) )
lowerCamelCase__ : Dict = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase__ : List[str] = MLukeTokenizer.from_pretrained(_lowercase )
lowerCamelCase__ : List[str] = '''Tokyo is the capital of <mask>.'''
lowerCamelCase__ : int = (24, 30)
lowerCamelCase__ : Dict = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCamelCase__ : List[Any] = model(**_lowercase )
lowerCamelCase__ : List[str] = encoding['''input_ids'''][0].tolist()
lowerCamelCase__ : str = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowerCamelCase__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
lowerCamelCase__ : List[str] = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase__ : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowercase ) )
model.save_pretrained(_lowercase )
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
lowerCamelCase__ : List[Any] = [json.loads(_lowercase ) for line in open(_lowercase )]
lowerCamelCase__ : int = {}
for entry in data:
lowerCamelCase__ : Union[str, Any] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase__ : Optional[int] = entity_id
break
lowerCamelCase__ : Optional[Any] = f"""{language}:{entity_name}"""
lowerCamelCase__ : Optional[Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 121 | 1 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main() | 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions | 45 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : Optional[int] = None
if token is not None:
_UpperCAmelCase : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
_UpperCAmelCase : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase : str = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
_UpperCAmelCase : Union[str, Any] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_UpperCAmelCase : List[str] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(_UpperCamelCase ):
_UpperCAmelCase : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=_UpperCamelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : Tuple = None
if token is not None:
_UpperCAmelCase : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
_UpperCAmelCase : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_UpperCAmelCase : List[Any] = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
_UpperCAmelCase : Any = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
_UpperCAmelCase : Dict = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(_UpperCamelCase ):
_UpperCAmelCase : Optional[Any] = requests.get(url + F'''&page={i + 2}''' , headers=_UpperCamelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Optional[int] = None
if token is not None:
_UpperCAmelCase : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
_UpperCAmelCase : int = requests.get(_UpperCamelCase , headers=_UpperCamelCase , allow_redirects=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = result.headers['''Location''']
_UpperCAmelCase : str = requests.get(_UpperCamelCase , allow_redirects=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_UpperCamelCase , F'''{artifact_name}.zip''' )
with open(_UpperCamelCase , '''wb''' ) as fp:
fp.write(response.content )
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = []
_UpperCAmelCase : int = None
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCamelCase ) as f:
for line in f:
_UpperCAmelCase : Optional[int] = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase : Optional[int] = line[: line.index(''': ''' )]
_UpperCAmelCase : str = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
_UpperCAmelCase : Tuple = line[len('''FAILED ''' ) :]
failed_tests.append(_UpperCamelCase )
elif filename == "job_name.txt":
_UpperCAmelCase : Optional[Any] = line
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCamelCase )} for `errors` '''
F'''and {len(_UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
_UpperCAmelCase : Tuple = None
if job_name and job_links:
_UpperCAmelCase : Optional[Any] = job_links.get(_UpperCamelCase , _UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase : Any = [x + [y] + [job_link] for x, y in zip(_UpperCamelCase , _UpperCamelCase )]
return result
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCamelCase , job_links=_UpperCamelCase ) )
return errors
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase : Optional[int] = counter.most_common()
_UpperCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase : Optional[Any] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase : List[str] = dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_UpperCamelCase ) )
return r
def _A ( _UpperCamelCase ):
_UpperCAmelCase : List[str] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
_UpperCAmelCase : List[str] = test.split('''/''' )[2]
else:
_UpperCAmelCase : Optional[int] = None
return test
def _A ( _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase : Optional[Any] = [x for x in logs if x[2] is not None]
_UpperCAmelCase : Union[str, Any] = {x[2] for x in logs}
_UpperCAmelCase : Any = {}
for test in tests:
_UpperCAmelCase : Dict = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase : Optional[Any] = counter.most_common()
_UpperCAmelCase : Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase : Optional[int] = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase : List[Any] = {'''count''': n_errors, '''errors''': error_counts}
_UpperCAmelCase : int = dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_UpperCamelCase ) )
return r
def _A ( _UpperCamelCase ):
_UpperCAmelCase : List[Any] = '''| no. | error | status |'''
_UpperCAmelCase : Dict = '''|-:|:-|:-|'''
_UpperCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase : Optional[int] = reduced_by_error[error]['''count''']
_UpperCAmelCase : List[str] = F'''| {count} | {error[:100]} | |'''
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
def _A ( _UpperCamelCase ):
_UpperCAmelCase : Tuple = '''| model | no. of errors | major error | count |'''
_UpperCAmelCase : List[Any] = '''|-:|-:|-:|-:|'''
_UpperCAmelCase : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase : List[str] = reduced_by_model[model]['''count''']
_UpperCAmelCase , _UpperCAmelCase : Tuple = list(reduced_by_model[model]['''errors'''].items() )[0]
_UpperCAmelCase : Optional[int] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase__ : Any = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase__ : List[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase__ : List[str] = k.find(' / ')
UpperCAmelCase__ : int = k[index + len(' / ') :]
UpperCAmelCase__ : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase__ : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase__ : Optional[int] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase__ : Optional[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ : Union[str, Any] = reduce_by_error(errors)
UpperCAmelCase__ : str = reduce_by_model(errors)
UpperCAmelCase__ : Any = make_github_table(reduced_by_error)
UpperCAmelCase__ : str = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 416 |
import baseaa
def _A ( _UpperCamelCase ):
return baseaa.baaencode(string.encode('''utf-8''' ) )
def _A ( _UpperCamelCase ):
return baseaa.baadecode(_UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = 'Hello World!'
UpperCAmelCase__ : List[str] = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 416 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[int] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 | '''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
_UpperCamelCase : int = min(UpperCAmelCase ,UpperCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 435 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def __lowerCAmelCase ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 ) -> Optional[int]:
__lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase =datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase =16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase =8
else:
__lowerCAmelCase =None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase )
__lowerCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Dict:
# Initialize accelerator
__lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase =config["""lr"""]
__lowerCAmelCase =int(config["""num_epochs"""] )
__lowerCAmelCase =int(config["""seed"""] )
__lowerCAmelCase =int(config["""batch_size"""] )
__lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase =MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase =get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase =AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
__lowerCAmelCase =get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase =model(**__lowerCamelCase )
__lowerCAmelCase =outputs.loss
__lowerCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase =model(**__lowerCamelCase )
__lowerCAmelCase =outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
__lowerCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCAmelCase =parser.parse_args()
__lowerCAmelCase ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 704 |
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase ( __lowerCamelCase : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 456 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ = kwargs.pop("""torchscript""" , self.torchscript )
UpperCAmelCase__ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
UpperCAmelCase__ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**a__ )
lowerCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={"""help""": """Trace the models using torchscript"""} )
lowerCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
lowerCAmelCase_ : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
UpperCAmelCase__ = torch.device("""cpu""" )
UpperCAmelCase__ = 0
elif is_torch_tpu_available():
UpperCAmelCase__ = xm.xla_device()
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCAmelCase__ = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.n_gpu > 0
| 603 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650 | 0 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ = b * b - 4 * a * c
lowercase_ = (-b + sqrt(__lowerCamelCase )) / (2 * a)
lowercase_ = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ , lowercase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 601 | 0 |
__a : Any = 8.3_14_45_98
def __magic_name__ ( lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__a : Union[str, Any] = 3_0_0
__a : Union[str, Any] = 2_8
__a : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 606 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
_UpperCamelCase = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
_UpperCamelCase = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =RoFormerTokenizer
def __init__( self : List[str] , _a : Optional[Any]=None , _a : Dict=None , _a : Dict=True , _a : Union[str, Any]="[UNK]" , _a : List[Any]="[SEP]" , _a : List[Any]="[PAD]" , _a : Any="[CLS]" , _a : Any="[MASK]" , _a : Dict=True , _a : Any=None , **_a : Tuple , ) -> Optional[int]:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _a ) != do_lower_case
or pre_tok_state.get('strip_accents' , _a ) != strip_accents
):
__lowerCamelCase : Dict = getattr(_a , pre_tok_state.pop('type' ) )
__lowerCamelCase : Tuple = do_lower_case
__lowerCamelCase : int = strip_accents
__lowerCamelCase : Dict = pre_tok_class(**_a )
__lowerCamelCase : Any = do_lower_case
def __getstate__( self : Union[str, Any] ) -> List[Any]:
__lowerCamelCase : Optional[Any] = self.__dict__.copy()
__lowerCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self : Optional[Any] , _a : Optional[int] ) -> Tuple:
__lowerCamelCase : Tuple = d
__lowerCamelCase : Optional[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(_a ) )
def _lowercase ( self : Any , _a : Union[str, Any] , _a : str=None ) -> Any:
__lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : int , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : List[str] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def _lowercase ( self : int , _a : Optional[Any] , _a : Tuple=None , _a : List[str]=None , _a : Optional[int]=False , **_a : Dict , ) -> Dict:
__lowerCamelCase : Dict = BertPreTokenizer()
return super().save_pretrained(_a , _a , _a , _a , **_a )
| 459 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCamelCase ( A_ ):
'''simple docstring'''
def __init__( self : List[str] , *__lowercase : List[str] , __lowercase : str=None , __lowercase : Tuple=None , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
UpperCAmelCase_ = eval_examples
UpperCAmelCase_ = post_process_function
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str = "eval" ):
'''simple docstring'''
UpperCAmelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ = self.get_eval_dataloader(__lowercase )
UpperCAmelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ = time.time()
try:
UpperCAmelCase_ = eval_loop(
__lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ = self.post_process_function(__lowercase , __lowercase , output.predictions )
UpperCAmelCase_ = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(__lowercase )
metrics.update(output.metrics )
else:
UpperCAmelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Any=None , __lowercase : str = "test" ):
'''simple docstring'''
UpperCAmelCase_ = self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ = time.time()
try:
UpperCAmelCase_ = eval_loop(
__lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ = self.post_process_function(__lowercase , __lowercase , output.predictions , """predict""" )
UpperCAmelCase_ = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(__lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase )
| 486 |
from math import factorial
UpperCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A_( A ):
if not isinstance(A , A ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A ) )
def A_( A = 60 , A = 1000000 ):
if not isinstance(A , A ) or not isinstance(A , A ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase_ = 0
# the cached sizes of the previous chains
UpperCAmelCase_ = {}
for start_chain_element in range(1 , A ):
# The temporary set will contain the elements of the chain
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A )
chain_set_length += 1
UpperCAmelCase_ = digit_factorial_sum(A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 486 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCAmelCase = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_lowerCAmelCase = F'''https://www.google.com/search?q={query}&num=100'''
_lowerCAmelCase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_lowerCAmelCase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_lowerCAmelCase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)['''url'''][0]
webbrowser.open(link)
| 259 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : List[Any] = MobileBertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE_ : List[Any] = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 701 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase__: List[Any] = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> int:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case_ :
__lowerCamelCase : int
__lowerCamelCase : float
__lowerCamelCase : str
__lowerCamelCase : bool
@dataclass
class snake_case_ :
__lowerCamelCase : int = 42
__lowerCamelCase : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class snake_case_ :
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[bool] = None
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] = 'titi'
__lowerCamelCase : List[str] = 'toto'
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] = 'titi'
__lowerCamelCase : Tuple = 'toto'
__lowerCamelCase : Any = 42
@dataclass
class snake_case_ :
__lowerCamelCase : BasicEnum = "toto"
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicEnum(self.foo )
@dataclass
class snake_case_ :
__lowerCamelCase : MixedTypeEnum = "toto"
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'help message'} )
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[List[str]] = list_field(default=[] )
__lowerCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
__lowerCamelCase : List[int] = list_field(default=[] )
__lowerCamelCase : List[int] = list_field(default=[1, 2, 3] )
__lowerCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
__lowerCamelCase : List[int] = field()
__lowerCamelCase : str = field()
__lowerCamelCase : BasicEnum = field()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
__lowerCamelCase : int
__lowerCamelCase : "BasicEnum" = field()
__lowerCamelCase : "Optional[bool]" = None
__lowerCamelCase : "str" = field(default='toto' , metadata={'help': 'help message'} )
__lowerCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : bool | None = None
@dataclass
class snake_case_ :
__lowerCamelCase : int | None = None
__lowerCamelCase : float | None = field(default=lowerCAmelCase , metadata={'help': 'help message'} )
__lowerCamelCase : str | None = None
__lowerCamelCase : list[str] | None = list_field(default=[] )
__lowerCamelCase : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE_ : Any = {k: v for k, v in vars(__lowerCAmelCase ).items() if k != 'container'}
SCREAMING_SNAKE_CASE_ : Tuple = {k: v for k, v in vars(__lowerCAmelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __lowerCAmelCase ) and yy.get('choices' , __lowerCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__lowerCAmelCase ) , yy['type'](__lowerCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument('--bar' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument('--baz' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument('--flag' , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs='?' )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((SCREAMING_SNAKE_CASE_) , ) : int = parser.parse_args_into_dataclasses(__lowerCAmelCase , look_for_args_file=__lowerCAmelCase )
self.assertFalse(example.flag )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__lowerCAmelCase )
expected.add_argument('--baz' , default='toto' , type=__lowerCAmelCase , help='help message' )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs='?' )
expected.add_argument('--baz' , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__lowerCAmelCase , dest='baz' )
expected.add_argument('--opt' , type=__lowerCAmelCase , default=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCAmelCase )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ : Optional[Any] = HfArgumentParser(__lowerCAmelCase )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __A ( self ):
@dataclass
class snake_case_ :
__lowerCamelCase : Literal["titi", "toto", 42] = "toto"
SCREAMING_SNAKE_CASE_ : int = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE_ : int = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__lowerCAmelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__lowerCAmelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__lowerCAmelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__lowerCAmelCase )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args([] )
self.assertEqual(
__lowerCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__lowerCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__lowerCAmelCase , type=__lowerCAmelCase )
expected.add_argument('--bar' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='help message' )
expected.add_argument('--baz' , default=__lowerCAmelCase , type=__lowerCAmelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__lowerCAmelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCAmelCase )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ : Dict = HfArgumentParser(__lowerCAmelCase )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = parser.parse_args([] )
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , bar=__lowerCAmelCase , baz=__lowerCAmelCase , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__lowerCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument('--required_str' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__lowerCAmelCase , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__lowerCAmelCase , required=__lowerCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__lowerCAmelCase , )
expected.add_argument('--opt' , type=__lowerCAmelCase , default=__lowerCAmelCase )
expected.add_argument('--baz' , default='toto' , type=__lowerCAmelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__lowerCAmelCase )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_dict(__lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = BasicExample(**__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__lowerCAmelCase , parser.parse_dict , __lowerCAmelCase , allow_extra_keys=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(__lowerCAmelCase , 'temp_json' )
os.mkdir(__lowerCAmelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
SCREAMING_SNAKE_CASE_ : Dict = BasicExample(**__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(__lowerCAmelCase , 'temp_yaml' )
os.mkdir(__lowerCAmelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
SCREAMING_SNAKE_CASE_ : Any = BasicExample(**__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = HfArgumentParser(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 311 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Tuple=30 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=32 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=10 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Any=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Tuple ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : int , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = ViTMSNModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTMSNModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[str] ):
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
__snake_case = ""
while len(SCREAMING_SNAKE_CASE ) % 3 != 0:
__snake_case = "0" + bin_string
__snake_case = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE ) )
oct_string += str(SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 163 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def snake_case_ ( snake_case , snake_case ) -> List[str]:
return torch.atana(snake_case , snake_case ) / math.pi * 2
def snake_case_ ( snake_case ) -> Optional[int]:
lowercase__: Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
lowercase__: str = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class __a ( __UpperCamelCase ):
'''simple docstring'''
pass
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4 )
lowercase__: str = deepcopy(self.diffusion )
lowercase__: Dict = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__ )
def snake_case_ ( snake_case ) -> int:
lowercase__: Optional[Any] = MODELS_MAP[model_name]['url']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__lowerCAmelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__lowerCAmelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__lowerCAmelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__lowerCAmelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def snake_case_ ( snake_case ) -> Union[str, Any]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def snake_case_ ( snake_case ) -> Union[str, Any]:
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(f'Attn error with {name}' )
def snake_case_ ( snake_case , snake_case=13 ) -> Tuple:
lowercase__: str = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
lowercase__: Tuple = 0
if string.startswith('net.3.' ):
depth += 1
lowercase__: int = string[6:]
elif string.startswith('net.' ):
lowercase__: Union[str, Any] = string[4:]
while string.startswith('main.7.' ):
depth += 1
lowercase__: Union[str, Any] = string[7:]
if string.startswith('main.' ):
lowercase__: List[str] = string[5:]
# mid block
if string[:2].isdigit():
lowercase__: int = string[:2]
lowercase__: Tuple = string[2:]
else:
lowercase__: str = string[0]
lowercase__: Any = string[1:]
if depth == max_depth:
lowercase__: Dict = MID_NUM_TO_LAYER[layer_num]
lowercase__: Union[str, Any] = 'mid_block'
elif depth > 0 and int(snake_case ) < 7:
lowercase__: Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
lowercase__: Optional[Any] = f'down_blocks.{depth}'
elif depth > 0 and int(snake_case ) > 7:
lowercase__: str = UP_NUM_TO_LAYER[layer_num]
lowercase__: Any = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowercase__: int = DEPTH_0_TO_LAYER[layer_num]
lowercase__: Dict = f'up_blocks.{max_depth - 1}' if int(snake_case ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowercase__: Optional[int] = string_left[1:]
if "resnets" in new_layer:
lowercase__: Union[str, Any] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
lowercase__: int = convert_attn_naming(snake_case )
lowercase__: Union[str, Any] = new_string_left
if not isinstance(snake_case , snake_case ):
lowercase__: Tuple = prefix + '.' + new_layer + '.' + string_left
else:
lowercase__: Tuple = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def snake_case_ ( snake_case ) -> int:
lowercase__: Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase__: Tuple = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
lowercase__: Tuple = transform_conv_attns(snake_case , snake_case , snake_case )
else:
lowercase__: Union[str, Any] = v
return new_state_dict
def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[int]:
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
lowercase__: Optional[int] = v[:, :, 0]
else:
# bias
lowercase__: Tuple = v
else:
# qkv matrices
lowercase__: Optional[Any] = v.shape[0]
lowercase__: Dict = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase__: Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase__: List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def snake_case_ ( snake_case ) -> Optional[Any]:
lowercase__: List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowercase__: Optional[Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowercase__: Optional[Any] = download(snake_case )
lowercase__: int = MODELS_MAP[model_name]['sample_rate']
lowercase__: Optional[Any] = MODELS_MAP[model_name]['sample_size']
lowercase__: Optional[int] = Object()
lowercase__: List[Any] = sample_size
lowercase__: Union[str, Any] = sample_rate
lowercase__: List[str] = 0
lowercase__: Tuple = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
lowercase__: List[Any] = diffusers_model.state_dict()
lowercase__: Any = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )['state_dict'] )
lowercase__: str = orig_model.diffusion_ema.eval()
lowercase__: Tuple = orig_model.state_dict()
lowercase__: Optional[Any] = rename_orig_weights(snake_case )
lowercase__: Union[str, Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase__: int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(snake_case ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowercase__: List[Any] = value.squeeze()
lowercase__: List[Any] = value
diffusers_model.load_state_dict(snake_case )
lowercase__: Any = 1_00
lowercase__: int = 33
lowercase__: Tuple = IPNDMScheduler(num_train_timesteps=snake_case )
lowercase__: Optional[int] = torch.manual_seed(snake_case )
lowercase__: Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
lowercase__: str = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
lowercase__: Any = get_crash_schedule(snake_case )
lowercase__: str = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
lowercase__: Optional[int] = torch.manual_seed(33 )
lowercase__: Tuple = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
lowercase__: Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
lowercase__: Union[str, Any] = generated.clamp(-1 , 1 )
lowercase__: Optional[Any] = (generated - audio).abs().sum()
lowercase__: Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case )
print('Diff max' , snake_case )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
main(args)
| 712 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 335 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase_ ( __A : Tuple=3_2 , __A : Optional[Any]=1_0 , __A : Optional[int]=1_0_0 , __A : str=1_0_2_6 , __A : List[str]=True , __A : Dict="data/tokenized_stories_train_wikitext103.jbl" , __A : Tuple="igf_context_pairs.jbl" , ) -> Tuple:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowercase , lowercase : str =generate_datasets(
__A , __A , number=__A , min_len=1_0_2_6 , trim=__A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase : Optional[Any] =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase : List[str] =load_gpta('''gpt2''' ).to(__A )
print('''computing perplexity on objective set''' )
lowercase : Tuple =compute_perplexity(__A , __A , __A ).item()
print('''perplexity on objective set:''' , __A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__A , __A , __A , __A , __A , __A , __A , __A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase_ ( __A : Optional[Any] , __A : int=1_5 , __A : Union[str, Any]=1_2_8 , __A : List[Any]=1_0_0 , __A : Optional[Any]="igf_model.pt" , ) -> Union[str, Any]:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
lowercase : List[str] =GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase : str =SecondaryLearner(__A )
# Train secondary learner
lowercase : Union[str, Any] =train_secondary_learner(
__A , __A , max_epochs=__A , batch_size=__A , eval_freq=1_0_0 , igf_model_path=__A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase_ ( __A : Any , __A : Dict , __A : List[str] , __A : List[str]=3_2 , __A : Any=1_0_0_0 , __A : Union[str, Any]=1_6 , __A : int=1.0 , __A : Optional[Any]=recopy_gpta , __A : Optional[Any]=None , __A : Optional[Any]=1_0 , __A : str="gpt2_finetuned.pt" , ) -> Dict:
"""simple docstring"""
lowercase : Tuple =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase : Tuple =RandomSampler(__A )
lowercase : List[str] =DataLoader(__A , sampler=__A )
lowercase : List[str] =max_steps // (len(__A )) + 1
lowercase : List[Any] =0
lowercase : Any =torch.zeros((1, context_len) , dtype=torch.long , device=__A )
lowercase , lowercase , lowercase : Union[str, Any] =recopy_model(__A , __A , __A )
model.train()
if secondary_learner is not None:
secondary_learner.to(__A )
secondary_learner.eval()
lowercase : int =[]
lowercase : List[Any] =0
lowercase : Optional[int] =[]
lowercase : Any =[]
# Compute the performance of the transformer model at the beginning
lowercase : str =compute_perplexity(__A , __A , __A )
test_perps.append(__A )
print('''Test perplexity, step''' , __A , ''':''' , __A )
for epoch in range(int(__A ) ):
for step, example in enumerate(__A ):
torch.cuda.empty_cache()
lowercase : int =random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase : Optional[int] =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase : int =model(__A , labels=__A )
lowercase : List[str] =True
if secondary_learner is not None:
lowercase : str =secondary_learner.forward(
torch.tensor(__A , dtype=torch.long , device=__A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
lowercase : List[str] =-1
if predicted_q < threshold:
lowercase : Union[str, Any] =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase : List[Any] =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase : Any =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase : List[Any] =compute_perplexity(__A , __A , __A )
test_perps.append(__A )
print('''Test perplexity, step''' , __A , ''':''' , __A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , __A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : List[str] =argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__A , default=__A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__A , default=__A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__A , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__A , default=__A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=3_2 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_0_0 , type=__A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_0_0 , type=__A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_0_0_0 , type=__A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_2_8 , type=__A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=1_6 , type=__A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=1_0 , type=__A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_0_0 , type=__A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_0_2_6 , type=__A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=1_5 , type=__A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__A , type=__A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__A , type=__A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=__A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase : Optional[int] =joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase : str =training_secondary_learner(
__A , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase : Any =GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
lowercase , lowercase : List[Any] =generate_datasets(
context_len=3_2 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_0_0 , min_len=1_0_2_6 , trim=__A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__A , __A , __A , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=__A , secondary_learner=__A , eval_interval=1_0 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 94 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 513 | 0 |
import requests
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
UpperCamelCase_: Union[str, Any] = {"""Content-Type""": """application/json"""}
UpperCamelCase_: List[Any] = requests.post(lowerCamelCase , json={"""text""": message_body} , headers=lowerCamelCase )
if response.status_code != 2_00:
UpperCamelCase_: Optional[Any] = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=None) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = XLNetConfig.from_json_file(_A)
_lowercase : str = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
_lowercase : Optional[int] = finetuning_task
_lowercase : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowercase : Optional[int] = XLNetForSequenceClassification(_A)
elif "squad" in finetuning_task:
_lowercase : Any = finetuning_task
_lowercase : List[str] = XLNetForQuestionAnswering(_A)
else:
_lowercase : Union[str, Any] = XLNetLMHeadModel(_A)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_A , _A , _A)
# Save pytorch-model
_lowercase : Optional[int] = os.path.join(_A , _A)
_lowercase : int = os.path.join(_A , _A)
print(F'''Save PyTorch model to {os.path.abspath(_A)}''')
torch.save(model.state_dict() , _A)
print(F'''Save configuration file to {os.path.abspath(_A)}''')
with open(_A , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
A = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 125 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
"""simple docstring"""
from itertools import count
def UpperCamelCase__ ( lowercase__ : int = 50 ):
snake_case : List[str] = [1] * min_block_length
for n in count(lowercase__ ):
fill_count_functions.append(1 )
for block_length in range(lowercase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 134 | 0 |
'''simple docstring'''
from itertools import product
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int ) -> list[int]:
'''simple docstring'''
lowercase =sides_number
lowercase =max_face_number * dice_number
lowercase =[0] * (max_total + 1)
lowercase =1
lowercase =range(lowercase_ , max_face_number + 1 )
for dice_numbers in product(lowercase_ , repeat=lowercase_ ):
lowercase =sum(lowercase_ )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCamelCase ( ) -> float:
'''simple docstring'''
lowercase =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase =0
lowercase =9
lowercase =4 * 9
lowercase =6
for peter_total in range(lowercase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase =(4**9) * (6**6)
lowercase =peter_wins_count / total_games_number
lowercase =round(lowercase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] ) -> List[Any]:
'''simple docstring'''
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_55 , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =do_rescale
lowercase =rescale_value
lowercase =do_normalize
lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =do_rescale if do_rescale is not None else self.do_rescale
lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase =do_normalize if do_normalize is not None else self.do_normalize
lowercase =image_mean if image_mean is not None else self.image_mean
lowercase =image_std if image_std is not None else self.image_std
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
lowercase =[self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
lowercase =[self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 0 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase_ : Optional[Any] = True
from torch.cuda.amp import autocast
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
def _lowerCAmelCase ( lowerCAmelCase=None , lowerCAmelCase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCAmelCase )
@dataclass
class UpperCamelCase_ :
_A : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_A : Optional[str] = field(
default=a_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_A : Optional[bool] = field(
default=a_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_A : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
_A : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
_A : Optional[float] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
_A : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
_A : Optional[float] = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
_A : Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class UpperCamelCase_ :
_A : Optional[str] = field(
default=a_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_A : Optional[str] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_A : bool = field(
default=a_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_A : Optional[int] = field(
default=a_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_A : Optional[int] = field(
default=a_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_A : Optional[int] = field(
default=a_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
_A : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class UpperCamelCase_ :
_A : WavaVecaProcessor
_A : Union[bool, str] = True
_A : Optional[int] = None
_A : Optional[int] = None
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self , snake_case__ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
UpperCAmelCase = [{"""input_values""": feature["""input_values"""]} for feature in features]
UpperCAmelCase = [{"""input_ids""": feature["""labels"""]} for feature in features]
UpperCAmelCase = self.processor.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
UpperCAmelCase = self.processor.pad(
labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
UpperCAmelCase = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
UpperCAmelCase = labels
return batch
class UpperCamelCase_ ( a_ ):
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> torch.Tensor:
"""simple docstring"""
model.train()
UpperCAmelCase = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(snake_case__ , snake_case__ )
else:
UpperCAmelCase = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
return loss.detach()
def _lowerCAmelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCAmelCase = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCAmelCase = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
UpperCAmelCase = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowerCAmelCase ):
UpperCAmelCase = re.sub(lowerCAmelCase , """""" , batch["""sentence"""] ).lower() + """ """
return batch
UpperCAmelCase = train_dataset.map(lowerCAmelCase , remove_columns=["""sentence"""] )
UpperCAmelCase = eval_dataset.map(lowerCAmelCase , remove_columns=["""sentence"""] )
def extract_all_chars(lowerCAmelCase ):
UpperCAmelCase = """ """.join(batch["""text"""] )
UpperCAmelCase = list(set(lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCAmelCase = train_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , batch_size=-1 , keep_in_memory=lowerCAmelCase , remove_columns=train_dataset.column_names , )
UpperCAmelCase = train_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , batch_size=-1 , keep_in_memory=lowerCAmelCase , remove_columns=eval_dataset.column_names , )
UpperCAmelCase = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
UpperCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase )}
UpperCAmelCase = vocab_dict[""" """]
del vocab_dict[" "]
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = len(lowerCAmelCase )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(lowerCAmelCase , lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(lowerCAmelCase ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(lowerCAmelCase ) )
if data_args.max_val_samples is not None:
UpperCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCAmelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = torchaudio.load(batch["""path"""] )
UpperCAmelCase = resampler(lowerCAmelCase ).squeeze().numpy()
UpperCAmelCase = 16000
UpperCAmelCase = batch["""text"""]
return batch
UpperCAmelCase = train_dataset.map(
lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase = eval_dataset.map(
lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
UpperCAmelCase = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCAmelCase )
return batch
UpperCAmelCase = train_dataset.map(
lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase = eval_dataset.map(
lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCAmelCase = datasets.load_metric("""wer""" )
def compute_metrics(lowerCAmelCase ):
UpperCAmelCase = pred.predictions
UpperCAmelCase = np.argmax(lowerCAmelCase , axis=-1 )
UpperCAmelCase = processor.tokenizer.pad_token_id
UpperCAmelCase = processor.batch_decode(lowerCAmelCase )
# we do not want to group tokens when computing the metrics
UpperCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase )
UpperCAmelCase = wer_metric.compute(predictions=lowerCAmelCase , references=lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase , padding=lowerCAmelCase )
# Initialize our Trainer
UpperCAmelCase = CTCTrainer(
model=lowerCAmelCase , data_collator=lowerCAmelCase , args=lowerCAmelCase , compute_metrics=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase = model_args.model_name_or_path
else:
UpperCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
UpperCAmelCase = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics("""train""" , lowerCAmelCase )
trainer.save_metrics("""train""" , lowerCAmelCase )
trainer.save_state()
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase )
UpperCAmelCase = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics("""eval""" , lowerCAmelCase )
trainer.save_metrics("""eval""" , lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 673 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="attention" ):
__a : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__a : List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__a : List[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__a : Tuple = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__a : Tuple = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__a : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__a : Tuple = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
if split_mlp_wi:
__a : str = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__a : int = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__a : Optional[Any] = (wi_a, wi_a)
else:
__a : Optional[int] = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__a : List[str] = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , *, SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
__a : Optional[int] = traverse_util.flatten_dict(variables['target'] )
__a : List[Any] = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a : Any = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = collections.OrderedDict()
# Shared embeddings.
__a : Any = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__a : Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
__a , __a , __a , __a : str = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
__a : Tuple = layer_norm
__a : Union[str, Any] = k.T
__a : Dict = o.T
__a : Union[str, Any] = q.T
__a : Optional[int] = v.T
# Block i, layer 1 (MLP).
__a : Optional[int] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
__a , __a : Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
__a : Dict = layer_norm
if split_mlp_wi:
__a : str = wi[0].T
__a : Optional[Any] = wi[1].T
else:
__a : List[Any] = wi.T
__a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__a : int = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' ).T
__a : Optional[int] = old['encoder/encoder_norm/scale']
if not scalable_attention:
__a : Any = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , 'encoder' ).T
__a : int = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__a : List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
__a , __a , __a , __a : Any = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
__a : Optional[Any] = layer_norm
__a : Union[str, Any] = k.T
__a : Optional[int] = o.T
__a : Dict = q.T
__a : Any = v.T
# Block i, layer 1 (Cross Attention).
__a : Optional[int] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
__a , __a , __a , __a : Tuple = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
__a : Optional[Any] = layer_norm
__a : Optional[Any] = k.T
__a : Optional[Any] = o.T
__a : Optional[int] = q.T
__a : Any = v.T
# Block i, layer 2 (MLP).
__a : Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
__a , __a : str = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = layer_norm
if split_mlp_wi:
__a : Union[str, Any] = wi[0].T
__a : int = wi[1].T
else:
__a : Dict = wi.T
__a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__a : List[str] = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' ).T
__a : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a : Any = old['decoder/logits_dense/kernel'].T
return new
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a : int = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a : Optional[Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__a : List[Any] = state_dict['shared.weight']
return state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__a : Any = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
__a : str = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , ):
__a : str = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a : Any = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
__a : Optional[Any] = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 597 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
SCREAMING_SNAKE_CASE_ = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE_ = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE_ = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_ = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE_ = torch.randn(2, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ = torch.rand(1) * 9_9_9
SCREAMING_SNAKE_CASE_ = torch.randn(2, 7_7, 7_6_8)
SCREAMING_SNAKE_CASE_ = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE_ = 6_6_6
SCREAMING_SNAKE_CASE_ = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE_ = {"generator": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 597 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a__ : int = True
from torch.cuda.amp import autocast
a__ : str = logging.getLogger(__name__)
def _lowerCAmelCase ( A__=None , A__=None ):
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : Optional[bool] = field(
default=lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
A : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
A : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
A : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A : Optional[int] = field(
default=lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
A : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : WavaVecaProcessor
A : Union[bool, str] = True
A : Optional[int] = None
A : Optional[int] = None
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : Any , lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowercase__ = [{'input_values': feature['input_values']} for feature in features]
lowercase__ = [{'input_ids': feature['labels']} for feature in features]
lowercase__ = self.processor.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowercase__ = self.processor.pad(
labels=lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
lowercase__ = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -1_00)
lowercase__ = labels
return batch
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : nn.Module , lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""simple docstring"""
model.train()
lowercase__ = self._prepare_inputs(lowerCAmelCase)
if self.use_amp:
with autocast():
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
else:
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
lowercase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase)
else:
loss.backward()
return loss.detach()
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__, lowercase__, lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__, lowercase__, lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase__ = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase__ = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
lowercase__ = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(A__ ):
lowercase__ = re.sub(A__ , '' , batch['sentence'] ).lower() + ' '
return batch
lowercase__ = train_dataset.map(A__ , remove_columns=['sentence'] )
lowercase__ = eval_dataset.map(A__ , remove_columns=['sentence'] )
def extract_all_chars(A__ ):
lowercase__ = ' '.join(batch['text'] )
lowercase__ = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase__ = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
lowercase__ = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
lowercase__ = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
lowercase__ = {v: k for k, v in enumerate(A__ )}
lowercase__ = vocab_dict[' ']
del vocab_dict[" "]
lowercase__ = len(A__ )
lowercase__ = len(A__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
lowercase__ = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
lowercase__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase__ = min(len(A__ ) , data_args.max_train_samples )
lowercase__ = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
lowercase__ = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase__ = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__ ):
lowercase__, lowercase__ = torchaudio.load(batch['path'] )
lowercase__ = resampler(A__ ).squeeze().numpy()
lowercase__ = 16_000
lowercase__ = batch['text']
return batch
lowercase__ = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase__ = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowercase__ = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(A__ )
return batch
lowercase__ = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
lowercase__ = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase__ = datasets.load_metric('wer' )
def compute_metrics(A__ ):
lowercase__ = pred.predictions
lowercase__ = np.argmax(A__ , axis=-1 )
lowercase__ = processor.tokenizer.pad_token_id
lowercase__ = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
lowercase__ = processor.batch_decode(pred.label_ids , group_tokens=A__ )
lowercase__ = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase__ = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
lowercase__ = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase__ = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
lowercase__ = min(A__ , len(A__ ) )
trainer.log_metrics('train' , A__ )
trainer.save_metrics('train' , A__ )
trainer.save_state()
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
lowercase__ = min(A__ , len(A__ ) )
trainer.log_metrics('eval' , A__ )
trainer.save_metrics('eval' , A__ )
return results
if __name__ == "__main__":
main()
| 710 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Optional[int] = 9, 14 # noqa: F841
_lowerCamelCase : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Union[str, Any] = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : List[str] = mst(A_ )
_lowerCamelCase : Union[str, Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : str = tuple(answer[:2] )
_lowerCamelCase : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 83 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
SCREAMING_SNAKE_CASE : Optional[int] = """▁"""
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = AlbertTokenizer
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Dict="[CLS]" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : str="[SEP]" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : List[str]="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , **__SCREAMING_SNAKE_CASE : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 197 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {'vocab_file': 'spiece.model'}
_snake_case : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_snake_case : Optional[int] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 1
_snake_case : str = 2
_snake_case : int = 3
_snake_case : Tuple = 4
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = """left"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : List[str]="<sep>" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : Dict="<cls>" , lowerCAmelCase_ : Optional[int]="<mask>" , lowerCAmelCase_ : Any=["<eop>", "<eod>"] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : List[str] ) -> Optional[int]:
return len(self.sp_model )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : str , lowerCAmelCase_ : Dict ) -> Tuple:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str , lowerCAmelCase_ : int ) -> Optional[int]:
if self.remove_space:
__lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
__lowerCAmelCase = inputs
__lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__lowerCAmelCase = unicodedata.normalize('NFKD' , lowerCAmelCase_ )
__lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
__lowerCAmelCase = outputs.lower()
return outputs
def lowercase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
__lowerCAmelCase = self.preprocess_text(lowerCAmelCase_ )
__lowerCAmelCase = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
__lowerCAmelCase = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCAmelCase = cur_pieces[1:]
else:
__lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def lowercase ( self : str , lowerCAmelCase_ : str ) -> Dict:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
__lowerCAmelCase = ''.join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , ' ' ).strip()
return out_string
def lowercase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : List[str] , ) -> str:
__lowerCAmelCase = kwargs.pop('use_source_tokenizer' , lowerCAmelCase_ )
__lowerCAmelCase = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
__lowerCAmelCase = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCAmelCase = ''.join(lowerCAmelCase_ )
__lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCAmelCase = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1]
return ([0] * len(lowerCAmelCase_ )) + [1, 1]
def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('GET', 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET', 'https://huggingface.co', timeout=1.0 )
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET', 'https://huggingface.co' )
def a_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
| 421 | 1 |
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list ) -> list:
'''simple docstring'''
if len(lowercase__ ) == 0:
return []
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = min(lowercase__ ), max(lowercase__ )
lowerCAmelCase_ : int = int(max_value - min_value ) + 1
lowerCAmelCase_ : list[list] = [[] for _ in range(lowercase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase__ )
return [v for bucket in buckets for v in sorted(lowercase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 600 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Tuple = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase_ : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase_ : Dict = tf_top_k_top_p_filtering(UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase_ : Union[str, Any] = output[output != -float("""inf""" )]
lowerCAmelCase_ : Tuple = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-1_2 )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@require_tf
class __a ( unittest.TestCase ,__UpperCamelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__snake_case : Optional[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def A ( self : str ):
# TF-only test: tf.saved_model export
lowerCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : Dict = 2
class __a ( tf.Module ):
def __init__( self : List[str] , UpperCAmelCase : int ):
super(UpperCAmelCase , self ).__init__()
lowerCAmelCase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : str = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase_ : Any = [[2, 0], [1_02, 1_03]]
lowerCAmelCase_ : Optional[Any] = [[1, 0], [1, 1]]
lowerCAmelCase_ : List[str] = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
lowerCAmelCase_ : Union[str, Any] = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for batch_size in range(1 , len(UpperCAmelCase ) + 1 ):
lowerCAmelCase_ : Tuple = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase_ : Optional[int] = serving_func(**UpperCAmelCase )["""sequences"""]
lowerCAmelCase_ : Dict = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
def A ( self : Dict ):
# TF-only test: tf.saved_model export
lowerCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Dict = 2
class __a ( tf.Module ):
def __init__( self : str , UpperCAmelCase : List[str] ):
super(UpperCAmelCase , self ).__init__()
lowerCAmelCase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : List[str] = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase_ : Dict = [[2], [1_02, 1_03]]
lowerCAmelCase_ : Union[str, Any] = [[1], [1, 1]]
lowerCAmelCase_ : Tuple = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
lowerCAmelCase_ : Union[str, Any] = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for input_row in range(len(UpperCAmelCase ) ):
lowerCAmelCase_ : Dict = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase_ : Tuple = serving_func(**UpperCAmelCase )["""sequences"""]
lowerCAmelCase_ : Union[str, Any] = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
@require_tensorflow_text
def A ( self : List[Any] ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase )
class __a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ):
super().__init__()
lowerCAmelCase_ : Dict = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase , """spiece.model""" ) , """rb""" ).read() )
lowerCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def A ( self : Any , UpperCAmelCase : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ):
lowerCAmelCase_ : List[Any] = self.tokenizer.tokenize(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = text.pad_model_inputs(
UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase_ : Optional[Any] = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
return self.tokenizer.detokenize(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = CompleteSentenceTransformer()
lowerCAmelCase_ : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
lowerCAmelCase_ : List[str] = complete_model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = tf.keras.Model(UpperCAmelCase , UpperCAmelCase )
keras_model.save(UpperCAmelCase )
def A ( self : List[Any] ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase_ : Union[str, Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowerCAmelCase_ : Union[str, Any] = 14
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : str = """Hello, my dog is cute and"""
lowerCAmelCase_ : Dict = tokenizer(UpperCAmelCase , return_tensors="""tf""" )
lowerCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCAmelCase_ : Optional[int] = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase_ : Tuple = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCAmelCase_ : List[str] = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A ( self : int ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : Dict = """Hugging Face is a technology company based in New York and Paris."""
lowerCAmelCase_ : Union[str, Any] = bart_tokenizer(UpperCAmelCase , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ : Dict = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : Tuple = bart_model.generate(UpperCAmelCase ).numpy()
class __a ( __UpperCamelCase ):
def A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str=None , **UpperCAmelCase : int ):
return super().call(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Any = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : List[str] = bart_model.generate(UpperCAmelCase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase , UpperCAmelCase ) )
class __a ( bart_model.model.encoder.__class__ ):
def A ( self : Tuple , UpperCAmelCase : int , **UpperCAmelCase : str ):
return super().call(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Tuple = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase_ : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase_ : Any = bart_model.generate(UpperCAmelCase ).numpy()
with self.assertRaises(UpperCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase , foo="""bar""" )
| 600 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = '''ylacombe/bark-small'''
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = '''en_speaker_1'''
UpperCAmelCase__ : int = '''This is a test string'''
UpperCAmelCase__ : List[Any] = '''speaker_embeddings_path.json'''
UpperCAmelCase__ : Union[str, Any] = '''speaker_embeddings'''
def lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Any = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase__ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase__ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase__ : Optional[Any] = 35
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Optional[Any] = 8
UpperCAmelCase__ : Union[str, Any] = {
'''semantic_prompt''': np.ones(__UpperCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ : Any = processor(text=self.input_string , voice_preset=__UpperCamelCase )
UpperCAmelCase__ : List[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : int = processor(text=self.input_string , voice_preset=__UpperCamelCase )
UpperCAmelCase__ : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : str = BarkProcessor(tokenizer=__UpperCamelCase )
UpperCAmelCase__ : Dict = processor(text=self.input_string )
UpperCAmelCase__ : Union[str, Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 702 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase_ = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
UpperCamelCase_ = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
UpperCamelCase_ = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
} | 599 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Any = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = SpeechTaTokenizer
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = True
def __UpperCamelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_a : int = SpeechTaTokenizer(__a )
_a : Tuple = AddedToken('<mask>' , lstrip=__a , rstrip=__a )
_a : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
_a : Any = "this is a test"
_a : Tuple = "this is a test"
return input_text, output_text
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2_0 , lowerCamelCase_=5 ) -> Optional[Any]:
_a : Tuple = self.get_input_output_texts(__a )
_a : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
_a : str = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def __UpperCamelCase ( self ) -> Dict:
_a : List[Any] = "<pad>"
_a : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __UpperCamelCase ( self ) -> Dict:
_a : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(__a ) , 8_1 )
def __UpperCamelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Optional[int] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_a : Optional[Any] = tokenizer.vocab_size
_a : List[str] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_a : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_a : Optional[int] = tokenizer.add_tokens(__a )
_a : Any = tokenizer.vocab_size
_a : Optional[Any] = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
_a : List[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_a : str = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_a : Dict = tokenizer.add_special_tokens(__a )
_a : List[str] = tokenizer.vocab_size
_a : List[Any] = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
_a : Any = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCamelCase ( self ) -> List[str]:
pass
def __UpperCamelCase ( self ) -> List[Any]:
pass
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = self.get_tokenizer()
_a : Dict = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__a , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
_a : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_a : List[Any] = tokenizer.convert_tokens_to_ids(__a )
# fmt: off
self.assertListEqual(__a , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
_a : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __UpperCamelCase ( self ) -> Any:
_a : Dict = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_a : Any = {
"input_ids": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__a , )
| 120 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
_lowerCAmelCase : Tuple = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Dict = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCAmelCase__ : Union[str, Any] =[
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
UpperCAmelCase__ : Dict =[
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
UpperCAmelCase__ : Optional[int] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCAmelCase__ : List[str] =F"down_blocks.{i}.resnets.{j}."
UpperCAmelCase__ : List[Any] =F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCAmelCase__ : Tuple =F"down_blocks.{i}.attentions.{j}."
UpperCAmelCase__ : int =F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCAmelCase__ : List[Any] =F"up_blocks.{i}.resnets.{j}."
UpperCAmelCase__ : Optional[int] =F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCAmelCase__ : str =F"up_blocks.{i}.attentions.{j}."
UpperCAmelCase__ : int =F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCAmelCase__ : Tuple =F"down_blocks.{i}.downsamplers.0.conv."
UpperCAmelCase__ : Optional[int] =F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCAmelCase__ : List[str] =F"up_blocks.{i}.upsamplers.0."
UpperCAmelCase__ : Dict =F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCAmelCase__ : int ='''mid_block.attentions.0.'''
UpperCAmelCase__ : Optional[Any] ='''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCAmelCase__ : Union[str, Any] =F"mid_block.resnets.{j}."
UpperCAmelCase__ : Tuple =F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _lowercase ( _UpperCAmelCase ) -> str:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowerCamelCase ={k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCamelCase =sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCamelCase =v.replace(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCamelCase =v.replace(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =v
lowerCamelCase ={v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCAmelCase__ : str =[
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCAmelCase__ : List[str] =F"encoder.down_blocks.{i}.resnets.{j}."
UpperCAmelCase__ : Optional[Any] =F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCAmelCase__ : Any =F"down_blocks.{i}.downsamplers.0."
UpperCAmelCase__ : str =F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCAmelCase__ : Any =F"up_blocks.{i}.upsamplers.0."
UpperCAmelCase__ : List[str] =F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCAmelCase__ : List[Any] =F"decoder.up_blocks.{i}.resnets.{j}."
UpperCAmelCase__ : Tuple =F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCAmelCase__ : Any =F"mid_block.resnets.{i}."
UpperCAmelCase__ : List[Any] =F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCAmelCase__ : Any =[
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase ={k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCamelCase =v.replace(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCamelCase =v.replace(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =v
lowerCamelCase ={v: vae_state_dict[k] for k, v in mapping.items()}
lowerCamelCase =["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowerCamelCase =reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCAmelCase__ : Any =[
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
UpperCAmelCase__ : List[Any] ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCAmelCase__ : List[Any] =re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCAmelCase__ : Optional[int] ={'''q''': 0, '''k''': 1, '''v''': 2}
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase ={}
lowerCamelCase ={}
lowerCamelCase ={}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCamelCase =k[: -len(""".q_proj.weight""" )]
lowerCamelCase =k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCamelCase =[None, None, None]
lowerCamelCase =v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCamelCase =k[: -len(""".q_proj.bias""" )]
lowerCamelCase =k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCamelCase =[None, None, None]
lowerCamelCase =v
continue
lowerCamelCase =textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
lowerCamelCase =v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCamelCase =textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
lowerCamelCase =torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCamelCase =textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
lowerCamelCase =torch.cat(_UpperCAmelCase )
return new_state_dict
def _lowercase ( _UpperCAmelCase ) -> str:
return text_enc_dict
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
UpperCAmelCase__ : List[str] =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCAmelCase__ : str =osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
UpperCAmelCase__ : List[str] =osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
UpperCAmelCase__ : Optional[Any] =osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCAmelCase__ : int =load_file(unet_path, device='''cpu''')
else:
UpperCAmelCase__ : Tuple =osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
UpperCAmelCase__ : str =torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
UpperCAmelCase__ : List[Any] =load_file(vae_path, device='''cpu''')
else:
UpperCAmelCase__ : Optional[int] =osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
UpperCAmelCase__ : Optional[Any] =torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
UpperCAmelCase__ : Any =load_file(text_enc_path, device='''cpu''')
else:
UpperCAmelCase__ : Union[str, Any] =osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
UpperCAmelCase__ : Dict =torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
UpperCAmelCase__ : List[str] =convert_unet_state_dict(unet_state_dict)
UpperCAmelCase__ : str ={'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCAmelCase__ : Optional[Any] =convert_vae_state_dict(vae_state_dict)
UpperCAmelCase__ : Optional[int] ={'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCAmelCase__ : str ='''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCAmelCase__ : Dict ={'''transformer.''' + k: v for k, v in text_enc_dict.items()}
UpperCAmelCase__ : Union[str, Any] =convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCAmelCase__ : Any ={'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
UpperCAmelCase__ : Optional[Any] =convert_text_enc_state_dict(text_enc_dict)
UpperCAmelCase__ : str ={'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCAmelCase__ : Tuple ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCAmelCase__ : Optional[int] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCAmelCase__ : Dict ={'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 269 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 269 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
re.sub("<n>" , "" , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 400 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = "t5"
lowerCAmelCase_ : int = ["past_key_values"]
lowerCAmelCase_ : List[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , a__=32_128 , a__=512 , a__=64 , a__=2_048 , a__=6 , a__=None , a__=8 , a__=32 , a__=128 , a__=0.1 , a__=1e-6 , a__=1.0 , a__="relu" , a__=True , a__=True , a__=0 , a__=1 , **a__ , ) -> str:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_layers
snake_case_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ = num_heads
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = feed_forward_proj
snake_case_ = use_cache
snake_case_ = self.feed_forward_proj.split("-" )
snake_case_ = act_info[-1]
snake_case_ = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case_ = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
snake_case_ = "past_encoder_sequence + sequence"
snake_case_ = {0: "batch"}
snake_case_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case_ = {0: "batch", 1: "decoder_sequence"}
snake_case_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 13
| 400 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_2 , snake_case=3 , snake_case=4 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[2, 2, 3, 2] , snake_case=True , snake_case=True , snake_case=3_7 , snake_case="gelu" , snake_case=1_0 , snake_case=0.02 , snake_case=["stage2", "stage3", "stage4"] , snake_case=[2, 3, 4] , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[int] = num_stages
UpperCAmelCase : int = hidden_sizes
UpperCAmelCase : Any = depths
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[str] = out_features
UpperCAmelCase : int = out_indices
UpperCAmelCase : Any = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ConvNextModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ConvNextForImageClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = ConvNextBackbone(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = model(snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : str = None
UpperCAmelCase : Any = ConvNextBackbone(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[str] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ConvNextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(snake_case )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = ConvNextModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(snake_case )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : Any = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**snake_case )
# verify the logits
UpperCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = ConvNextConfig
SCREAMING_SNAKE_CASE__ : str = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ConvNextModelTester(self )
| 700 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a : int = "\\n Text data.\n Second line of data."
a : Tuple = "file"
@pytest.fixture(scope="session" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCAmelCase : str = bytes(__magic_name__ , "utf-8" )
with zstd.open(__magic_name__ , "wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __magic_name__ ) , "w" ) as f:
f.write(__magic_name__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCAmelCase : List[Any] = input_paths[compression_format]
UpperCAmelCase : Tuple = tmp_path / "cache"
UpperCAmelCase : Dict = DownloadConfig(cache_dir=__magic_name__ , extract_compressed_file=__magic_name__ )
UpperCAmelCase : int = cached_path(__magic_name__ , download_config=__magic_name__ )
with open(__magic_name__ ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = "custom_cache"
UpperCAmelCase : Optional[Any] = "custom_extracted_dir"
UpperCAmelCase : List[Any] = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCAmelCase : Union[str, Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __magic_name__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__magic_name__ ) )
UpperCAmelCase : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase : int = xz_file
UpperCAmelCase : Tuple = (
DownloadConfig(extract_compressed_file=__magic_name__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__magic_name__ )
)
UpperCAmelCase : Optional[int] = cached_path(__magic_name__ , download_config=__magic_name__ )
assert Path(__magic_name__ ).parent.parts[-2:] == expected
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = str(Path(__magic_name__ ).resolve() )
assert cached_path(__magic_name__ ) == text_file
# relative path
UpperCAmelCase : Tuple = str(Path(__magic_name__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__magic_name__ ) == text_file
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
# relative path
UpperCAmelCase : Union[str, Any] = "./__missing_file__.txt"
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = get_from_cache(F"tmp://{tmpfs_file}" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowercase ( ):
'''simple docstring'''
with pytest.raises(__magic_name__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
http_get("https://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
ftp_get("ftp://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
fsspec_get("s3://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
fsspec_head("s3://huggingface.co" )
| 609 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
# Initialise PyTorch model
UpperCAmelCase_ = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_ = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase__ )}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : str = feature_size
__a : Union[str, Any] = sampling_rate
__a : str = padding_value
__a : int = kwargs.pop('padding_side' , 'right' )
__a : Optional[int] = kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a : Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__a : Dict = processed_features[self.model_input_names[0]]
__a : Optional[Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
__a : List[str] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a : List[Any] = required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
__a : Dict = 'tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = 'pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
__a : Optional[int] = 'np'
else:
raise ValueError(
f'''type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a : int = to_numpy(SCREAMING_SNAKE_CASE__ )
else:
__a : Optional[int] = [to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a : List[Any] = self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = processed_features[self.model_input_names[0]]
__a : List[str] = len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__a : Any = []
for i in range(SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__a : List[Any] = self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a : List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a : List[Any] = PaddingStrategy.MAX_LENGTH
__a : Optional[Any] = {}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
__a : Any = self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__a : int = []
if value.dtype is np.dtype(np.floataa ):
__a : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
'''simple docstring'''
__a : int = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a : Union[str, Any] = np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
__a : int = max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
__a : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (0, difference) )
__a : List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a : List[str] = np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a : Optional[int] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__a : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a : List[str] = np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__a : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
__a : List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a : int = processed_features['attention_mask'][:max_length]
return processed_features
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__a : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = padding
else:
__a : Tuple = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 711 |
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__snake_case = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__snake_case = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
__snake_case = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
__snake_case = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
__snake_case = tf.keras.preprocessing.image.img_to_array(test_image)
__snake_case = np.expand_dims(test_image, axis=0)
__snake_case = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__snake_case = "Normal"
if result[0][0] == 1:
__snake_case = "Abnormality detected"
| 386 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = GPTSwaTokenizer
lowercase = False
lowercase = True
lowercase = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(__magic_name__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """This is a test"""
UpperCamelCase = """This is a test"""
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """<s>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 2_0_0_0 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__magic_name__ , )
| 386 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : str ) -> float:
'''simple docstring'''
def get_matched_characters(_snake_case : str , _snake_case : str ) -> str:
_A = []
_A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_A = int(max(0 , i - limit ) )
_A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_snake_case )
_A = F'''{_stra[0:_stra.index(_snake_case )]} {_stra[_stra.index(_snake_case ) + 1:]}'''
return "".join(_snake_case )
# matching characters
_A = get_matched_characters(_snake_case , _snake_case )
_A = get_matched_characters(_snake_case , _snake_case )
_A = len(_snake_case )
# transposition
_A = (
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case ) if ca != ca] ) // 2
)
if not match_count:
_A = 0.0
else:
_A = (
1
/ 3
* (
match_count / len(_snake_case )
+ match_count / len(_snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 505 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''AutoTokenizer'''
UpperCAmelCase : Optional[Any] = ['''tokenizer''']
UpperCAmelCase : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
super().__init__(_UpperCAmelCase )
_A = speaker_embeddings
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int="speaker_embeddings_path.json" , **_UpperCAmelCase : int ):
if speaker_embeddings_dict_path is not None:
_A = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_A = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
_A = json.load(_UpperCAmelCase )
else:
_A = None
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : Union[str, Any]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
_A = {}
_A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_A = self._load_voice_preset(_UpperCAmelCase )
_A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
_A = os.path.join(_UpperCAmelCase , F'''{prompt_key}_{key}.npy''' )
_A = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ):
_A = self.speaker_embeddings[voice_preset]
_A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_A = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_A = np.load(_UpperCAmelCase )
return voice_preset_dict
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]="pt" , _UpperCAmelCase : Union[str, Any]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Any , ):
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_A = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_A = voice_preset + '.npz'
_A = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
_A = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
_A = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
_A = voice_preset
return encoded_text
| 505 | 1 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a ='\nimport os\n'
a ='\ndef foo():\n import os\n return False\n'
a ='\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
a ='\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
a ='\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
a ='\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
a ='\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
a ='\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
a ='\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
a ='\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
a =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , __lowerCAmelCase )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowerCamelCase__ =os.path.join(__lowerCAmelCase , "test_file.py" )
with open(__lowerCAmelCase , "w" ) as _tmp_file:
_tmp_file.write(__lowerCAmelCase )
lowerCamelCase__ =get_imports(__lowerCAmelCase )
assert parsed_imports == ["os"]
| 530 | """simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
lowerCamelCase__ =int(os.environ.get(__lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return strtobool(__lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase="no" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return value
| 530 | 1 |
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
__lowerCAmelCase = set({'(', '[', '{'} )
__lowerCAmelCase = set({')', ']', '}'} )
__lowerCAmelCase = {'{': '}', '[': ']', '(': ')'}
for i in range(len(__A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__A ) == 0 or (len(__A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__A ) == 0
def a_ ( ):
__lowerCAmelCase = input('Enter sequence of brackets: ' )
if is_balanced(__A ):
print(__A, 'is balanced' )
else:
print(__A, 'is not balanced' )
if __name__ == "__main__":
main()
| 701 |
_snake_case : List[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 421 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase :int = logging.get_logger(__name__)
class _lowerCamelCase :
'''simple docstring'''
A_ : Any = 42
A_ : str = None
@staticmethod
def __lowerCAmelCase ( ) -> Tuple:
raise NotImplementedError
def __lowerCAmelCase ( self : List[str] , _A : List[str] , _A : int , _A : str , **_A : List[str] ) -> Optional[Any]:
raise NotImplementedError
def __lowerCAmelCase ( self : List[Any] , _A : Optional[int] ) -> Optional[int]:
raise NotImplementedError
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def __lowerCAmelCase ( cls : str ) -> List[str]:
return F'`pip install {cls.pip_package or cls.name}`'
class _lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A_ : Tuple = """optuna"""
@staticmethod
def __lowerCAmelCase ( ) -> List[str]:
return is_optuna_available()
def __lowerCAmelCase ( self : Optional[int] , _A : Any , _A : int , _A : str , **_A : Optional[int] ) -> str:
return run_hp_search_optuna(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : Dict , _A : Dict ) -> str:
return default_hp_space_optuna(_A )
class _lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A_ : int = """ray"""
A_ : Optional[int] = """\'ray[tune]\'"""
@staticmethod
def __lowerCAmelCase ( ) -> Optional[Any]:
return is_ray_available()
def __lowerCAmelCase ( self : Dict , _A : Optional[Any] , _A : int , _A : str , **_A : Tuple ) -> Optional[Any]:
return run_hp_search_ray(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : Tuple , _A : Optional[Any] ) -> Optional[Any]:
return default_hp_space_ray(_A )
class _lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A_ : Dict = """sigopt"""
@staticmethod
def __lowerCAmelCase ( ) -> str:
return is_sigopt_available()
def __lowerCAmelCase ( self : int , _A : Tuple , _A : int , _A : str , **_A : Optional[Any] ) -> List[Any]:
return run_hp_search_sigopt(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : str , _A : List[Any] ) -> Optional[int]:
return default_hp_space_sigopt(_A )
class _lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A_ : Any = """wandb"""
@staticmethod
def __lowerCAmelCase ( ) -> Tuple:
return is_wandb_available()
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int , _A : str , **_A : Dict ) -> List[Any]:
return run_hp_search_wandb(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : Dict , _A : Dict ) -> str:
return default_hp_space_wandb(_A )
lowerCAmelCase :Optional[int] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__UpperCamelCase ) > 0:
__magic_name__ : Union[str, Any] = available_backends[0].name
if len(__UpperCamelCase ) > 1:
logger.info(
f'{len(__UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 561 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Tuple = "pt"
elif is_tf_available():
A : Optional[int] = "tf"
else:
A : Optional[Any] = "jax"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def __A ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Tuple ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __A ( self : List[Any] , **__magic_name__ : Dict ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[int]=20 , __magic_name__ : Any=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = " " + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE_ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = "Unicode €."
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE_ = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __magic_name__ )
self.assertIn("attention_mask" , __magic_name__ )
self.assertNotIn("decoder_input_ids" , __magic_name__ )
self.assertNotIn("decoder_attention_mask" , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="max_length" , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE_ = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , text_target=__magic_name__ )
self.assertEqual(__magic_name__ , batch["input_ids"][0] )
self.assertEqual(__magic_name__ , batch["labels"][0] )
def __A ( self : Dict ) -> List[str]:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__magic_name__ )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(__magic_name__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __A ( self : Optional[int] ) -> List[Any]:
pass
def __A ( self : List[Any] ) -> List[Any]:
pass
def __A ( self : List[str] ) -> List[str]:
pass
def __A ( self : Any ) -> Union[str, Any]:
pass
def __A ( self : List[Any] ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
__magic_name__ , skip_special_tokens=__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [] )
setattr(__magic_name__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 140 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCamelCase : Dict = """facebook/wmt19-en-de"""
__lowerCamelCase : Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCamelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCamelCase : Dict = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
__lowerCamelCase : Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__lowerCamelCase : int = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__lowerCamelCase : Dict = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 448 |
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A__ ( _a : float , _a : str , _a : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case__ : Tuple =(
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(_a )}"
)
raise ValueError(_a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 448 | 1 |
"""simple docstring"""
class __UpperCAmelCase( __lowercase ):
"""simple docstring"""
pass
class __UpperCAmelCase( __lowercase ):
"""simple docstring"""
pass
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : str= [
[],
[],
[],
]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(UpperCAmelCase__ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
'''simple docstring'''
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowercase__ : List[str]= []
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
lowercase__ : Optional[int]= min(self.queue )
self.queue.remove(UpperCAmelCase__ )
return data
def __str__( self ):
'''simple docstring'''
return str(self.queue )
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int]= FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Union[str, Any]= ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 218 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : list[tuple[float, float]] ) -> Optional[int]:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float = 0.01 ) -> Union[str, Any]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 133 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCamelCase ( lowerCAmelCase__ : int ):
random.seed(lowerCAmelCase__ )
np.random.seed(lowerCAmelCase__ )
torch.manual_seed(lowerCAmelCase__ )
torch.cuda.manual_seed_all(lowerCAmelCase__ )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : Iterable[torch.nn.Parameter] , snake_case_ : float = 0.9999 , snake_case_ : float = 0.0 , snake_case_ : int = 0 , snake_case_ : bool = False , snake_case_ : Union[float, int] = 1.0 , snake_case_ : Union[float, int] = 2 / 3 , snake_case_ : Optional[Any] = None , snake_case_ : Dict[str, Any] = None , **snake_case_ : int , ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a : str = True
if kwargs.get('''max_value''' , snake_case_ ) is not None:
__a : List[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : Optional[Any] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case_ ) is not None:
__a : Optional[int] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : int = kwargs['''min_value''']
__a : Any = list(snake_case_ )
__a : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case_ ) is not None:
__a : Optional[Any] = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
self.to(device=kwargs['''device'''] )
__a : List[str] = None
__a : Tuple = decay
__a : str = min_decay
__a : Any = update_after_step
__a : List[str] = use_ema_warmup
__a : Any = inv_gamma
__a : Any = power
__a : Union[str, Any] = 0
__a : Dict = None # set in `step()`
__a : Any = model_cls
__a : Any = model_config
@classmethod
def lowerCAmelCase (cls : List[str] , snake_case_ : Dict , snake_case_ : Dict ):
__a : Optional[int] = model_cls.load_config(snake_case_ , return_unused_kwargs=snake_case_ )
__a : Dict = model_cls.from_pretrained(snake_case_ )
__a : List[Any] = cls(model.parameters() , model_cls=snake_case_ , model_config=model.config )
ema_model.load_state_dict(snake_case_ )
return ema_model
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
__a : int = self.model_cls.from_config(self.model_config )
__a : List[Any] = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case_ )
model.register_to_config(**snake_case_ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int ):
__a : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a : List[str] = (1 + step) / (1_0 + step)
__a : Dict = min(snake_case_ , self.decay )
# make sure decay is not smaller than min_decay
__a : int = max(snake_case_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : List[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Union[str, Any] = parameters.parameters()
__a : Optional[Any] = list(snake_case_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a : str = self.get_decay(self.optimization_step )
__a : List[str] = decay
__a : Dict = 1 - decay
__a : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a : Dict = deepspeed.zero.GatheredParameters(snake_case_ , modifier_rank=snake_case_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = list(snake_case_ )
for s_param, param in zip(self.shadow_params , snake_case_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase (self : int , snake_case_ : int=None , snake_case_ : int=None ):
__a : str = [
p.to(device=snake_case_ , dtype=snake_case_ ) if p.is_floating_point() else p.to(device=snake_case_ )
for p in self.shadow_params
]
def lowerCAmelCase (self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase (self : Tuple , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a : Optional[Any] = None
def lowerCAmelCase (self : Optional[int] , snake_case_ : dict ):
__a : Dict = copy.deepcopy(snake_case_ )
__a : int = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
__a : List[str] = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case_ ):
raise ValueError('''Invalid min_decay''' )
__a : Dict = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case_ ):
raise ValueError('''Invalid optimization_step''' )
__a : Optional[int] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case_ ):
raise ValueError('''Invalid update_after_step''' )
__a : Any = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case_ ):
raise ValueError('''Invalid use_ema_warmup''' )
__a : Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
__a : Tuple = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
__a : Dict = state_dict.get('''shadow_params''' , snake_case_ )
if shadow_params is not None:
__a : Tuple = shadow_params
if not isinstance(self.shadow_params , snake_case_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 712 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=3 , snake_case_ : List[str]=3_0 , snake_case_ : Union[str, Any]=4_0_0 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : Optional[int]=[0.5, 0.5, 0.5] , snake_case_ : Dict=True , snake_case_ : Any=1 / 2_5_5 , snake_case_ : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__a : List[Any] = parent
__a : Optional[Any] = batch_size
__a : int = num_channels
__a : Any = min_resolution
__a : Optional[Any] = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size
__a : Dict = do_normalize
__a : Any = image_mean
__a : Tuple = image_std
__a : Union[str, Any] = do_rescale
__a : Union[str, Any] = rescale_factor
__a : List[Any] = do_pad
def lowerCAmelCase (self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ):
if not batched:
__a : str = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : int = int(self.size['''shortest_edge'''] * h / w )
__a : Any = self.size['''shortest_edge''']
elif w > h:
__a : Tuple = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : List[Any] = self.size['''shortest_edge''']
__a : Dict = self.size['''shortest_edge''']
else:
__a : Union[str, Any] = []
for image in image_inputs:
__a , __a : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Union[str, Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__a : Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def lowerCAmelCase (self : Any ):
__a : Any = YolosImageProcessingTester(self )
@property
def lowerCAmelCase (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__a : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : str = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : str ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Dict = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : int = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Any ):
# Initialize image_processings
__a : Any = self.image_processing_class(**self.image_processor_dict )
__a : str = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[Any] = image_processing_a.pad(snake_case_ , return_tensors='''pt''' )
__a : Union[str, Any] = image_processing_a(snake_case_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : List[str] ):
# prepare image and target
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : str = json.loads(f.read() )
__a : Dict = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__a : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : Tuple = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify orig_size
__a : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
@slow
def lowerCAmelCase (self : Optional[int] ):
# prepare image, target and masks_path
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : int = json.loads(f.read() )
__a : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__a : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : Any = YolosImageProcessor(format='''coco_panoptic''' )
__a : Any = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify masks
__a : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , snake_case_ )
# verify orig_size
__a : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
| 326 | 0 |
def A__ (snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
a__ = int(input('''Enter number: ''').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 279 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a__ = logging.get_logger(__name__)
a__ = {}
a__ = {}
a__ = {}
def A__ (snake_case : type , snake_case : Optional[str] , snake_case : Optional[List[str]] = None , ) -> str:
__UpperCamelCase : int = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__UpperCamelCase : Optional[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__UpperCamelCase : Optional[int] = format_type
def A__ (snake_case : Exception , snake_case : Optional[str] , snake_case : Optional[List[str]] = None ) -> Tuple:
__UpperCamelCase : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__UpperCamelCase : Optional[int] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a__ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a__ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a__ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def A__ (snake_case : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A__ (snake_case : Optional[str] , **snake_case : List[str] ) -> Formatter:
__UpperCamelCase : int = get_format_type_from_alias(snake_case )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 279 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ["""keras_nlp"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['keras_nlp'] )
| 565 |
from math import factorial
UpperCAmelCase = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 | 1 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = sorted(numsa + numsa)
lowerCamelCase_ ,lowerCamelCase_ : List[str] = divmod(len(lowerCAmelCase_) , 2)
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__magic_name__ = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 250 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Any = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Tuple = generator("Something there" )
self.assertEqual(a_ , [{"generated_text": ANY(a_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
lowerCamelCase_ : str = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
lowerCamelCase_ : List[str] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
with self.assertRaises(a_ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ : Tuple = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : str = generator(
"Something there" , num_return_sequences=a_ , num_beams=a_ , )
lowerCamelCase_ : Dict = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a_ , a_ )
lowerCamelCase_ : Any = generator("This is a test" , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ )
self.assertEqual(
a_ , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
lowerCamelCase_ : Tuple = generator.model.config.eos_token_id
lowerCamelCase_ : List[str] = "<pad>"
lowerCamelCase_ : Tuple = generator(
["This is a test", "This is a second test"] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ : Any = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
| 250 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''bert'''
def __init__( self , _UpperCamelCase=30_522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> str:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class _lowerCAmelCase ( __a ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 279 | 0 |
'''simple docstring'''
import random
def a_ ( __snake_case : int , __snake_case : Tuple , __snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =a[left_index]
lowerCamelCase_ =left_index + 1
for j in range(left_index + 1 , __snake_case ):
if a[j] < pivot:
lowerCamelCase_ =a[i], a[j]
i += 1
lowerCamelCase_ =a[i - 1], a[left_index]
return i - 1
def a_ ( __snake_case : str , __snake_case : List[str] , __snake_case : List[Any] ) -> int:
"""simple docstring"""
if left < right:
lowerCamelCase_ =random.randint(__snake_case , right - 1 )
lowerCamelCase_ =(
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCamelCase_ =partition(__snake_case , __snake_case , __snake_case )
quick_sort_random(
__snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =input('''Enter numbers separated by a comma:\n''' ).strip()
lowerCamelCase_ =[int(__snake_case ) for item in user_input.split(''',''' )]
quick_sort_random(__snake_case , 0 , len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 676 | def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any]=False ) -> Dict:
if isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase ):
A : int =len(set_a.intersection(lowercase ) )
if alternative_union:
A : Tuple =len(lowercase ) + len(lowercase )
else:
A : Any =len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase, (list, tuple) ) and isinstance(lowercase, (list, tuple) ):
A : int =[element for element in set_a if element in set_b]
if alternative_union:
A : Union[str, Any] =len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
A : Optional[Any] =set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
_lowercase : str ={'''a''', '''b''', '''c''', '''d''', '''e'''}
_lowercase : List[Any] ={'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 305 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( _a : Sequence[float] , _a : bool = False ):
"""simple docstring"""
if not arr:
return 0
A = 0 if allow_empty_subarrays else float("""-inf""" )
A = 0.0
for num in arr:
A = max(0 if allow_empty_subarrays else num , curr_sum + num )
A = max(_a , _a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 255 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
A = torch.nn.Linear(1_0 ,1_0 )
A = torch.optim.SGD(model.parameters() ,0.1 )
A = Accelerator()
A = accelerator.prepare(lowerCamelCase_ )
try:
pickle.loads(pickle.dumps(lowerCamelCase_ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 255 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """gptsan-japanese"""
UpperCamelCase = [
"""past_key_values""",
]
UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :str , __snake_case :str=3_60_00 , __snake_case :List[Any]=12_80 , __snake_case :Tuple=10_24 , __snake_case :Union[str, Any]=81_92 , __snake_case :Dict=40_96 , __snake_case :Tuple=1_28 , __snake_case :Union[str, Any]=10 , __snake_case :List[Any]=0 , __snake_case :int=16 , __snake_case :Tuple=16 , __snake_case :int=1_28 , __snake_case :Optional[Any]=0.0 , __snake_case :Any=1E-5 , __snake_case :str=False , __snake_case :Dict=0.0 , __snake_case :str="float32" , __snake_case :int=False , __snake_case :int=False , __snake_case :Optional[int]=False , __snake_case :Tuple=0.002 , __snake_case :Any=False , __snake_case :Optional[Any]=True , __snake_case :Optional[int]=3_59_98 , __snake_case :Dict=3_59_95 , __snake_case :Optional[int]=3_59_99 , **__snake_case :Union[str, Any] , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : Any =max_position_embeddings
__magic_name__ : int =d_model
__magic_name__ : Any =d_ff
__magic_name__ : Dict =d_ext
__magic_name__ : Union[str, Any] =d_spout
__magic_name__ : List[Any] =num_switch_layers
__magic_name__ : int =num_ext_layers
__magic_name__ : Optional[int] =num_switch_layers + num_ext_layers
__magic_name__ : Union[str, Any] =num_heads
__magic_name__ : Any =num_experts
__magic_name__ : Optional[int] =expert_capacity
__magic_name__ : Union[str, Any] =dropout_rate
__magic_name__ : Any =layer_norm_epsilon
__magic_name__ : Union[str, Any] =router_bias
__magic_name__ : int =router_jitter_noise
__magic_name__ : str =router_dtype
__magic_name__ : Optional[int] =router_ignore_padding_tokens
__magic_name__ : str =output_hidden_states
__magic_name__ : Dict =output_attentions
__magic_name__ : Dict =initializer_factor
__magic_name__ : List[str] =output_router_logits
__magic_name__ : int =use_cache
super().__init__(
separator_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
| 21 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
SCREAMING_SNAKE_CASE_ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
UpperCamelCase = TestCommand(*__snake_case )
test_command.run()
UpperCamelCase = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
UpperCamelCase = DatasetInfosDict.from_directory(__snake_case )
UpperCamelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 235_1563,
'''num_examples''': 1_0000,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_8418,
'''num_examples''': 1000,
},
] ,download_size=394_0680 ,dataset_size=258_9981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected | 34 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case="shi-labs/oneformer_demo" ) -> Any:
with open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) as f:
_UpperCAmelCase = json.load(__snake_case )
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = []
for key, info in class_info.items():
_UpperCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__snake_case ) )
_UpperCAmelCase = thing_ids
_UpperCAmelCase = class_names
return metadata
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=7 , lowerCamelCase : str=3 , lowerCamelCase : Union[str, Any]=30 , lowerCamelCase : Optional[int]=400 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=True , lowerCamelCase : List[str]=True , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=10 , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=255 , lowerCamelCase : Tuple="shi-labs/oneformer_demo" , lowerCamelCase : Tuple="ade20k_panoptic.json" , lowerCamelCase : Optional[Any]=10 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = class_info_file
_UpperCAmelCase = prepare_metadata(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = num_text
_UpperCAmelCase = repo_path
# for the post_process_functions
_UpperCAmelCase = 2
_UpperCAmelCase = 10
_UpperCAmelCase = 10
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = num_labels
_UpperCAmelCase = do_reduce_labels
_UpperCAmelCase = ignore_index
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["""shortest_edge"""] * h / w )
_UpperCAmelCase = self.size["""shortest_edge"""]
elif w > h:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = self.size["""shortest_edge"""]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCamelCase = image_processing_class
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """ignore_index""" ) )
self.assertTrue(hasattr(lowerCamelCase , """class_info_file""" ) )
self.assertTrue(hasattr(lowerCamelCase , """num_text""" ) )
self.assertTrue(hasattr(lowerCamelCase , """repo_path""" ) )
self.assertTrue(hasattr(lowerCamelCase , """metadata""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_reduce_labels""" ) )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Tuple=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]="np" ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase = self.image_processing_tester.num_labels
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
if with_segmentation_maps:
_UpperCAmelCase = num_labels
if is_instance_map:
_UpperCAmelCase = list(range(lowerCamelCase ) ) * 2
_UpperCAmelCase = dict(enumerate(lowerCamelCase ) )
_UpperCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase = [Image.fromarray(lowerCamelCase ) for annotation in annotations]
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , lowerCamelCase , return_tensors="""pt""" , instance_id_to_semantic_id=lowerCamelCase , pad_and_return_pixel_mask=lowerCamelCase , )
return inputs
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def common(lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=None ):
_UpperCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase , is_instance_map=lowerCamelCase , segmentation_type=lowerCamelCase )
_UpperCAmelCase = inputs["""mask_labels"""]
_UpperCAmelCase = inputs["""class_labels"""]
_UpperCAmelCase = inputs["""pixel_values"""]
_UpperCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase )
common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" )
common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" )
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = np.zeros((20, 50) )
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = binary_mask_to_rle(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase , target_sizes=lowerCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_instance_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_panoptic_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 108 | 0 |
"""simple docstring"""
from itertools import count
def _a ( _snake_case = 50 ):
"""simple docstring"""
UpperCAmelCase = [1] * min_block_length
for n in count(a_ ):
fill_count_functions.append(1 )
for block_length in range(a_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@slow
@require_torch
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
SCREAMING_SNAKE_CASE = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch['article'] , padding='max_length' , truncation=lowerCAmelCase__ , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowerCAmelCase__ , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy='steps' , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 247 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Tuple = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 247 | 1 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :int = position
__UpperCamelCase :List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCamelCase :Union[str, Any] = []
for position in positions:
__UpperCamelCase , __UpperCamelCase :Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
__UpperCamelCase , __UpperCamelCase :List[Any] = position
if board[y][x] == 0:
__UpperCamelCase :int = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
__UpperCamelCase :Dict = 0
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
__UpperCamelCase :int = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
__UpperCamelCase :Any = 0
__UpperCamelCase :str = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 | from __future__ import annotations
import bisect
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
if hi < 0:
__UpperCamelCase :str = len(SCREAMING_SNAKE_CASE )
while lo < hi:
__UpperCamelCase :Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCamelCase :List[Any] = mid + 1
else:
__UpperCamelCase :Any = mid
return lo
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
if hi < 0:
__UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE )
while lo < hi:
__UpperCamelCase :Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCamelCase :Dict = mid + 1
else:
__UpperCamelCase :Optional[Any] = mid
return lo
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = 0
__UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__UpperCamelCase :Optional[int] = left + (right - left) // 2
__UpperCamelCase :List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCamelCase :Union[str, Any] = midpoint - 1
else:
__UpperCamelCase :Dict = midpoint + 1
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = bisect.bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if index != len(SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if right < left:
return None
__UpperCamelCase :str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , midpoint + 1 , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = input('''Enter numbers separated by comma:\n''').strip()
__lowercase = sorted(int(item) for item in user_input.split(''','''))
__lowercase = int(input('''Enter a single number to be found in the list:\n'''))
__lowercase = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 452 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Any = "mvp"
lowerCAmelCase__ : str = ["past_key_values"]
lowerCAmelCase__ : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] ,UpperCamelCase : int=5_0267 ,UpperCamelCase : Any=1024 ,UpperCamelCase : List[str]=12 ,UpperCamelCase : Optional[Any]=4096 ,UpperCamelCase : Tuple=16 ,UpperCamelCase : int=12 ,UpperCamelCase : List[str]=4096 ,UpperCamelCase : Dict=16 ,UpperCamelCase : str=0.0 ,UpperCamelCase : str=0.0 ,UpperCamelCase : Tuple="gelu" ,UpperCamelCase : int=1024 ,UpperCamelCase : Union[str, Any]=0.1 ,UpperCamelCase : int=0.0 ,UpperCamelCase : int=0.0 ,UpperCamelCase : Tuple=0.0_2 ,UpperCamelCase : Tuple=0.0 ,UpperCamelCase : List[str]=False ,UpperCamelCase : Any=True ,UpperCamelCase : str=1 ,UpperCamelCase : Optional[int]=0 ,UpperCamelCase : Dict=2 ,UpperCamelCase : List[str]=True ,UpperCamelCase : Any=2 ,UpperCamelCase : Optional[int]=2 ,UpperCamelCase : List[Any]=False ,UpperCamelCase : str=100 ,UpperCamelCase : str=800 ,**UpperCamelCase : str ,) -> int:
_lowercase : Optional[int] = vocab_size
_lowercase : Tuple = max_position_embeddings
_lowercase : List[Any] = d_model
_lowercase : Any = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : int = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : List[Any] = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : str = decoder_layerdrop
_lowercase : Tuple = classifier_dropout
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = use_prompt
_lowercase : Optional[int] = prompt_length
_lowercase : Any = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,forced_eos_token_id=UpperCamelCase ,**UpperCamelCase ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,UpperCamelCase ):
_lowercase : List[Any] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' ) | 125 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( a_ : str = 2_0_0 ) -> int:
UpperCAmelCase__ : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase__ : int = [0] * (pence + 1)
UpperCAmelCase__ : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(a_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682 | 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = self.dummy_uncond_unet
UpperCAmelCase__ : Dict = KarrasVeScheduler()
UpperCAmelCase__ : Any = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase__ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' , return_dict=_UpperCAmelCase )[0]
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = KarrasVeScheduler()
UpperCAmelCase__ : int = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe(num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ : Any = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 599 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = "char"
__lowerCamelCase : str = "bpe"
__lowerCamelCase : str = "wp"
A_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["image_processor", "char_tokenizer"]
__lowerCamelCase : str = "ViTImageProcessor"
__lowerCamelCase : Union[str, Any] = "MgpstrTokenizer"
def __init__( self: str , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase_ , )
UpperCamelCase_ =kwargs.pop("feature_extractor" )
UpperCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCamelCase_ =tokenizer
UpperCamelCase_ =AutoTokenizer.from_pretrained("gpt2" )
UpperCamelCase_ =AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Dict ):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCamelCase_ =self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
UpperCamelCase_ =self.char_tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase_ =encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =sequences
UpperCamelCase_ =char_preds.size(0 )
UpperCamelCase_ , UpperCamelCase_ =self._decode_helper(UpperCamelCase_ , "char" )
UpperCamelCase_ , UpperCamelCase_ =self._decode_helper(UpperCamelCase_ , "bpe" )
UpperCamelCase_ , UpperCamelCase_ =self._decode_helper(UpperCamelCase_ , "wp" )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
for i in range(UpperCamelCase_ ):
UpperCamelCase_ =[char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase_ =[char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase_ =scores.index(max(UpperCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase_ ={}
UpperCamelCase_ =final_strs
UpperCamelCase_ =final_scores
UpperCamelCase_ =char_strs
UpperCamelCase_ =bpe_strs
UpperCamelCase_ =wp_strs
return out
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ):
if format == DecodeType.CHARACTER:
UpperCamelCase_ =self.char_decode
UpperCamelCase_ =1
UpperCamelCase_ ="[s]"
elif format == DecodeType.BPE:
UpperCamelCase_ =self.bpe_decode
UpperCamelCase_ =2
UpperCamelCase_ ="#"
elif format == DecodeType.WORDPIECE:
UpperCamelCase_ =self.wp_decode
UpperCamelCase_ =102
UpperCamelCase_ ="[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCamelCase_ , UpperCamelCase_ =[], []
UpperCamelCase_ =pred_logits.size(0 )
UpperCamelCase_ =pred_logits.size(1 )
UpperCamelCase_ , UpperCamelCase_ =pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase_ , sorted=UpperCamelCase_ )
UpperCamelCase_ =preds_index.view(-1 , UpperCamelCase_ )[:, 1:]
UpperCamelCase_ =decoder(UpperCamelCase_ )
UpperCamelCase_ , UpperCamelCase_ =torch.nn.functional.softmax(UpperCamelCase_ , dim=2 ).max(dim=2 )
UpperCamelCase_ =preds_max_prob[:, 1:]
for index in range(UpperCamelCase_ ):
UpperCamelCase_ =preds_str[index].find(UpperCamelCase_ )
UpperCamelCase_ =preds_str[index][:pred_eos]
UpperCamelCase_ =preds_index[index].cpu().tolist()
UpperCamelCase_ =pred_index.index(UpperCamelCase_ ) if eos_token in pred_index else -1
UpperCamelCase_ =preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase_ =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_ )
conf_scores.append(UpperCamelCase_ )
return dec_strs, conf_scores
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: List[Any] ):
UpperCamelCase_ =[seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
return self.bpe_tokenizer.batch_decode(UpperCamelCase_ )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =[seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
| 391 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ["input_features", "attention_mask"]
def __init__( self: Tuple , UpperCamelCase_: List[Any]=80 , UpperCamelCase_: int=1_6000 , UpperCamelCase_: Optional[int]=80 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=True , **UpperCamelCase_: int , ):
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =num_mel_bins
UpperCamelCase_ =do_ceptral_normalize
UpperCamelCase_ =normalize_means
UpperCamelCase_ =normalize_vars
UpperCamelCase_ =True
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: np.ndarray , ):
UpperCamelCase_ =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase_ =torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
UpperCamelCase_ =ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase__ ( UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase_ =x[:input_length].mean(axis=0 )
UpperCamelCase_ =np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
UpperCamelCase_ =x[:input_length].std(axis=0 )
UpperCamelCase_ =np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
UpperCamelCase_ =padding_value
# make sure array is in float32
UpperCamelCase_ =x.astype(np.floataa )
return x
def UpperCamelCase__ ( self: Any , UpperCamelCase_: List[np.ndarray] , UpperCamelCase_: Optional[np.ndarray] = None ):
UpperCamelCase_ =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self: Dict , UpperCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , **UpperCamelCase_: Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase_ =isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase_ =is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ =[np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
UpperCamelCase_ =np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ =[raw_speech]
# extract fbank features
UpperCamelCase_ =[self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase_ =BatchFeature({"input_features": features} )
UpperCamelCase_ =self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
UpperCamelCase_ =padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCamelCase_ ):
UpperCamelCase_ =[np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase_ =padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCamelCase_ =[np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase_ =(
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase_ =self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
UpperCamelCase_ =padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 391 | 1 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( a_ ):
def __init__( self , *a_ , a_=None , a_=None , **a_ ):
super().__init__(*a_ , **a_ )
a_ : int = eval_examples
a_ : Optional[Any] = post_process_function
def snake_case_ ( self , a_=None , a_=None , a_=None , a_ = "eval" ):
a_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[Any] = self.get_eval_dataloader(a_ )
a_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : int = self.compute_metrics
a_ : List[Any] = None
a_ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Optional[int] = time.time()
try:
a_ : List[str] = eval_loop(
a_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
a_ : Any = compute_metrics
a_ : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : Optional[int] = self.post_process_function(a_ , a_ , output.predictions )
a_ : Tuple = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
a_ : Tuple = metrics.pop(a_ )
metrics.update(output.metrics )
else:
a_ : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def snake_case_ ( self , a_ , a_ , a_=None , a_ = "test" ):
a_ : Tuple = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : List[str] = None
a_ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : List[str] = time.time()
try:
a_ : Tuple = eval_loop(
a_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
a_ : int = compute_metrics
a_ : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : str = self.post_process_function(a_ , a_ , output.predictions , "predict" )
a_ : Any = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
a_ : Optional[int] = metrics.pop(a_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ ) | 370 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 10, SCREAMING_SNAKE_CASE__ = 1_000, SCREAMING_SNAKE_CASE__ = True ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
return int((number_a + number_a) / 2 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> None:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(SCREAMING_SNAKE_CASE__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a_ : List[str] = lower
a_ : Dict = higher
a_ : str = []
while True:
a_ : List[str] = get_avg(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
last_numbers.append(SCREAMING_SNAKE_CASE__ )
if answer(SCREAMING_SNAKE_CASE__ ) == "low":
a_ : Optional[Any] = number
elif answer(SCREAMING_SNAKE_CASE__ ) == "high":
a_ : Union[str, Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowerCAmelCase_ ( ) -> None:
a_ : str = int(input("Enter lower value : " ).strip() )
a_ : Dict = int(input("Enter high value : " ).strip() )
a_ : Optional[Any] = int(input("Enter value to guess : " ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main() | 370 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=64 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = embedding_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = MobileBertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = MobileBertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = MobileBertForNextSentencePrediction(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MobileBertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = MobileBertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileBertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileBertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = MobileBertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> Tuple:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = MobileBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return torch.tensor(
__snake_case, dtype=torch.long, device=__snake_case, )
_a = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = MobileBertModel.from_pretrained('''google/mobilebert-uncased''').to(__a)
_UpperCamelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = torch.Size((1, 9, 5_12))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=__a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
_UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 19 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Dict = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160 | 0 |
import numpy as np
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] = 1e-1_2 , SCREAMING_SNAKE_CASE__ : Dict = 100 , ) -> Union[str, Any]:
assert np.shape(A_ )[0] == np.shape(A_ )[1]
# Ensure proper dimensionality.
assert np.shape(A_ )[0] == np.shape(A_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A_ ) == np.iscomplexobj(A_ )
_snake_case : List[Any] = np.iscomplexobj(A_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_snake_case : Any = False
_snake_case : List[str] = 0
_snake_case : Union[str, Any] = 0
_snake_case : int = 1e1_2
while not convergence:
# Multiple matrix by the vector.
_snake_case : List[Any] = np.dot(A_ , A_ )
# Normalize the resulting output vector.
_snake_case : Any = w / np.linalg.norm(A_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_snake_case : Any = vector.conj().T if is_complex else vector.T
_snake_case : Optional[Any] = np.dot(A_ , np.dot(A_ , A_ ) )
# Check convergence.
_snake_case : str = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_snake_case : Dict = True
_snake_case : Any = lambda_
if is_complex:
_snake_case : str = np.real(lambda_ )
return lambda_, vector
def lowercase ( ) -> Any:
_snake_case : Union[str, Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_snake_case : str = np.array([41, 4, 20] )
_snake_case : List[str] = real_input_matrix.astype(np.complexaaa )
_snake_case : Tuple = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_snake_case : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_snake_case : int = real_input_matrix
_snake_case : Tuple = real_vector
elif problem_type == "complex":
_snake_case : Tuple = complex_input_matrix
_snake_case : Tuple = complex_vector
# Our implementation.
_snake_case , _snake_case : str = power_iteration(A_ , A_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_snake_case , _snake_case : Any = np.linalg.eigh(A_ )
# Last eigenvalue is the maximum one.
_snake_case : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_snake_case : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A_ ) - np.abs(A_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 710 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
a__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ = """allenai"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
_snake_case : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_snake_case : Tuple = d[k] # restore
return da
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
# prep
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case : Optional[Any] = basename(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : int = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_snake_case : List[str] = cls.hub_models()
_snake_case : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
_snake_case : Dict = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_snake_case : List[Any] = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = vars(chkpt["""args"""]["""model"""] )
_snake_case : Union[str, Any] = args["""source_lang"""]
_snake_case : Tuple = args["""target_lang"""]
_snake_case : Any = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{src_lang}.txt''' )
_snake_case : Any = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{tgt_lang}.txt''' )
_snake_case : List[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = rewrite_dict_keys(src_dict.indices )
_snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-src.json""" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_snake_case : str = True
for k in src_vocab.keys():
if not k.islower():
_snake_case : Any = False
break
_snake_case : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
_snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-tgt.json""" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
_snake_case : str = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_snake_case : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as fin:
_snake_case : Dict = fin.read()
_snake_case : Optional[Any] = re.sub(R""" \d+$""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_snake_case : Optional[int] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.0_2,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
_snake_case : Tuple = 5
_snake_case : int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_snake_case : List[str] = best_score_hparams[model_dir]["""length_penalty"""]
else:
_snake_case : Optional[Any] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : str = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_024,
"""do_lower_case""": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
_snake_case : Optional[Any] = chkpt["""models"""][0]
_snake_case : List[str] = model.state_dict()
# rename keys to start with 'model.'
_snake_case : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_snake_case : Union[str, Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
_snake_case : int = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 198 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {'vocab_file': 'sentencepiece.model'}
_lowerCamelCase : List[Any] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowerCamelCase : str = {
'google/rembert': 2_56,
}
class lowercase ( _a ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple=False , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]="[CLS]" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Dict="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[str]="[PAD]" , _UpperCamelCase : Optional[int]="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , **_UpperCamelCase : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase_ )
@property
def __snake_case( self : str ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(lowerCAmelCase_ )
return pieces
def __snake_case( self : int , _UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase_ )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def __snake_case( self : Tuple , _UpperCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.sp_model.decode_pieces(lowerCAmelCase_ )
return out_string
def __snake_case( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 403 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( _A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> List[str]:
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
A_ : Union[str, Any] = field
A_ : List[str] = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
A_ : Union[str, Any] = Json(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , field=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase_ ( self ) -> List[str]:
if self.streaming:
A_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : str = None
A_ : Optional[Any] = None
A_ : Dict = None
A_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
A_ : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
A_ : Tuple = dataset
A_ : str = path_or_buf
A_ : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : List[str] = num_proc
A_ : Tuple = """utf-8"""
A_ : Any = to_json_kwargs
def UpperCAmelCase_ ( self ) -> int:
A_ : str = self.to_json_kwargs.pop("""path_or_buf""" , UpperCamelCase__ )
A_ : Optional[Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : List[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Dict = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : str = self.to_json_kwargs.pop("""compression""" , UpperCamelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=UpperCamelCase__ ) as buffer:
A_ : List[str] = self._write(file_obj=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
""" was passed. Please provide a local path instead.""" )
A_ : Optional[int] = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
return written
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
A_ , A_ , A_ , A_ , A_ : str = args
A_ : List[Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : List[str] = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **UpperCamelCase__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> int:
A_ : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase__ )
else:
A_ , A_ : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(UpperCamelCase__ )
return written
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vit_msn'''
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-06 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(**_lowerCamelCase )
A_ : Tuple = hidden_size
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Optional[Any] = image_size
A_ : Dict = patch_size
A_ : Dict = num_channels
A_ : Dict = qkv_bias
| 385 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = """Hello world! cécé herlolip"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str , snake_case__ :bool ) -> Optional[Any]:
_lowercase = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
_lowercase = roberta.model.encoder.sentence_encoder
_lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _lowerCamelCase )
_lowercase = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowercase = roberta_sent_encoder.embed_tokens.weight
_lowercase = roberta_sent_encoder.embed_positions.weight
_lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowercase = roberta_sent_encoder.layer_norm.weight
_lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowercase = model.roberta.encoder.layer[i]
_lowercase = roberta_sent_encoder.layers[i]
_lowercase = layer.attention
_lowercase = roberta_layer.self_attn_layer_norm.weight
_lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowercase = roberta_layer.self_attn.q_proj.weight
_lowercase = roberta_layer.self_attn.q_proj.bias
_lowercase = roberta_layer.self_attn.k_proj.weight
_lowercase = roberta_layer.self_attn.k_proj.bias
_lowercase = roberta_layer.self_attn.v_proj.weight
_lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowercase = roberta_layer.self_attn.out_proj.weight
_lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowercase = roberta_layer.final_layer_norm.weight
_lowercase = roberta_layer.final_layer_norm.bias
# intermediate
_lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase = roberta_layer.fca.weight
_lowercase = roberta_layer.fca.bias
# output
_lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase = roberta_layer.fca.weight
_lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'].dense.weight
_lowercase = roberta.model.classification_heads['mnli'].dense.bias
_lowercase = roberta.model.classification_heads['mnli'].out_proj.weight
_lowercase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_lowercase = roberta.model.encoder.lm_head.dense.weight
_lowercase = roberta.model.encoder.lm_head.dense.bias
_lowercase = roberta.model.encoder.lm_head.layer_norm.weight
_lowercase = roberta.model.encoder.lm_head.layer_norm.bias
_lowercase = roberta.model.encoder.lm_head.weight
_lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowercase = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
_lowercase = model(_lowerCamelCase )[0]
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'](roberta.extract_features(_lowerCamelCase ) )
else:
_lowercase = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
_lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_lowercase = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 67 |
"""simple docstring"""
__lowercase : Union[str, Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
__a =DebertaVaTokenizer
__a =DebertaVaTokenizerFast
__a =True
__a =True
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = DebertaVaTokenizer(lowerCamelCase , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = 'this is a test'
__a = 'this is a test'
return input_text, output_text
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = '<pad>'
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowerCamelCase ) , 3_0001 )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
# fmt: off
__a = ' \tHeLLo!how \n Are yoU? '
__a = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , do_lower_case=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , do_lower_case=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
# fmt: off
__a = 'I was born in 92000, and this is falsé.'
__a = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , split_by_punct=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , split_by_punct=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
# fmt: off
__a = 'I was born in 92000, and this is falsé.'
__a = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# fmt: off
__a = 'I was born in 92000, and this is falsé.'
__a = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
# fmt: off
__a = 'I was born in 92000, and this is falsé.'
__a = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
# fmt: off
__a = ' \tHeLLo!how \n Are yoU? '
__a = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__a = DebertaVaTokenizer(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , do_lower_case=lowerCamelCase , split_by_punct=lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = 'I was born in 92000, and this is falsé.'
__a = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
__a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = 'This is a test'
__a = [13, 1, 4398, 25, 21, 1289]
__a = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
__a = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__a = DebertaVaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__a = DebertaVaTokenizerFast(lowerCamelCase , keep_accents=lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# fmt: off
__a = 'I was born in 92000, and this is falsé.'
__a = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__a = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__a = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = DebertaVaTokenizer(lowerCamelCase )
__a = tokenizer.encode('sequence builders' )
__a = tokenizer.encode('multi-sequence build' )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase , )
@slow
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
# fmt: off
__a = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , ) | 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 270 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> np.array:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = "f32le" ,) -> Union[str, Any]:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "f32le" ,) -> Any:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ) -> Optional[Any]:
lowerCamelCase_ = b''
lowerCamelCase_ ,lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 42 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( lowerCamelCase_ = 1_000_000) -> int:
UpperCamelCase__ : int = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2)
- max(1 , sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 596 | 0 |
"""simple docstring"""
from math import sqrt
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : Optional[int] = 0
for i in range(1 , int(sqrt(__UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCamelCase ):
total += i + n // i
elif i == sqrt(__UpperCamelCase ):
total += i
return total - n
def _snake_case ( UpperCamelCase : Optional[Any] = 10000 ):
UpperCAmelCase : Tuple = sum(
i
for i in range(1 , __UpperCamelCase )
if sum_of_divisors(sum_of_divisors(__UpperCamelCase ) ) == i and sum_of_divisors(__UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'AutoTokenizer'
__lowerCAmelCase : str = ['tokenizer']
__lowerCAmelCase : Any = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
UpperCAmelCase : Any = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
UpperCAmelCase : Optional[int] = None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
UpperCAmelCase : List[str] = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , _SCREAMING_SNAKE_CASE="speaker_embeddings" , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = {}
UpperCAmelCase : Union[str, Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase : Optional[Any] = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
UpperCAmelCase : Tuple = tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.speaker_embeddings[voice_preset]
UpperCAmelCase : List[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
UpperCAmelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
UpperCAmelCase : List[str] = np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase : Dict = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
UpperCAmelCase : Tuple = voice_preset + """.npz"""
UpperCAmelCase : Union[str, Any] = np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
UpperCAmelCase : List[Any] = voice_preset
return encoded_text
| 359 | 0 |
from math import pi, sqrt, tan
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def UpperCamelCase ( _A : float , _A : float , _A : float )-> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase ( _A : float , _A : float , _A : float )-> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
A__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(_A , 2 ) * torus_radius * tube_radius
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def UpperCamelCase ( _A : float , _A : float , _A : float )-> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
A__ = (sidea + sidea + sidea) / 2
A__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def UpperCamelCase ( _A : float , _A : float , _A : float )-> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase ( _A : int , _A : float )-> float:
"""simple docstring"""
if not isinstance(_A , _A ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 491 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : torch.FloatTensor
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , UpperCAmelCase__ = 16 , UpperCAmelCase__ = 88 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 32 , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = "geglu" , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__ )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=1 , UpperCAmelCase__=None , UpperCAmelCase__ = True , ):
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A__ = self.norm(UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = self.proj_in(UpperCAmelCase__ )
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__ )
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__ )
| 491 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def snake_case__ ( _A: np.ndarray , _A: np.ndarray , _A: np.ndarray , _A: np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = np.shape(_A )
lowerCAmelCase = np.shape(_A )
lowerCAmelCase = np.shape(_A )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
"""Expected the same number of rows for A and B. """
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(_A )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
"""Expected the same number of columns for B and C. """
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(_A )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(_A )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]])
lowerCAmelCase = np.array([[2, 1], [6, 3]])
lowerCAmelCase = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = np.block([[a, b], [b.T, c]])
lowerCAmelCase = np.linalg.det(__lowerCAmelCase)
lowerCAmelCase = np.linalg.det(__lowerCAmelCase)
lowerCAmelCase = np.linalg.det(__lowerCAmelCase)
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]])
lowerCAmelCase = np.array([[2, 1], [6, 3]])
with self.assertRaises(__lowerCAmelCase):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]])
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(__lowerCAmelCase):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 605 | '''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case__ ( _A: np.ndarray , _A: np.ndarray , _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = cva.getAffineTransform(_A , _A )
return cva.warpAffine(_A , _A , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowercase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowercase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowercase , __lowercase = gray_img.shape
# set different points to rotate image
__lowercase = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
__lowercase = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
__lowercase = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
__lowercase = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
__lowercase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowercase = plt.figure(1)
__lowercase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 605 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case__ )
assert mmeta["long_pair"] == "heb-eng"
| 572 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase_ : str = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase_ : Optional[int] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase_ : Dict = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase_ : str = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase_ : Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=[1, 10, 100] , snake_case__=4 , snake_case__=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = Counter()
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : str = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
_SCREAMING_SNAKE_CASE : Any = candidate + "\n" + test_case
_SCREAMING_SNAKE_CASE : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
_SCREAMING_SNAKE_CASE : List[Any] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = [], []
for result in results.values():
result.sort()
_SCREAMING_SNAKE_CASE : List[str] = [r[1]["passed"] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
_SCREAMING_SNAKE_CASE : List[str] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : str = k
_SCREAMING_SNAKE_CASE : Any = {F'''pass@{k}''': estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
def estimator(lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = itertools.repeat(lowerCamelCase__, len(lowerCamelCase__ ) )
else:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = iter(lowerCamelCase__ )
return np.array([estimator(int(lowerCamelCase__ ), int(lowerCamelCase__ ), lowerCamelCase__ ) for n, c in zip(lowerCamelCase__, lowerCamelCase__ )] )
| 572 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
UpperCamelCase__ = '▁'
class a ( lowercase ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = AlbertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="[CLS]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<unk>" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<pad>" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase__ : Union[str, Any] = (
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Optional[Any] = do_lower_case
UpperCAmelCase__ : Optional[Any] = remove_space
UpperCAmelCase__ : List[Any] = keep_accents
UpperCAmelCase__ : List[str] = vocab_file
UpperCAmelCase__ : Tuple = False if not self.vocab_file else True
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 254 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( _snake_case ):
def wrapper(*_snake_case ,**_snake_case ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Dict = func(*_snake_case ,**_snake_case )
UpperCAmelCase__ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def lowerCamelCase ( _snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[Any] = seq_shapes or {}
for i in range(_snake_case ):
UpperCAmelCase__ : Tuple = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_snake_case ,_ArrayXD ):
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_snake_case ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : List[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
UpperCAmelCase__ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(_snake_case ,datasets.Sequence ):
while isinstance(_snake_case ,datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : Optional[Any] = seq_shapes[k]
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*_snake_case ).astype(v.dtype )
UpperCAmelCase__ : str = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : Any = generate_examples(_snake_case ,num_examples=_snake_case ,seq_shapes=_snake_case )
with ArrowWriter(features=_snake_case ,path=_snake_case ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : int = features.encode_example(_snake_case )
writer.write(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase__ : str = datasets.Dataset.from_file(filename=_snake_case ,info=datasets.DatasetInfo(features=_snake_case ) )
return dataset
| 254 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 263 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def __magic_name__ ( *lowercase , **lowercase ) -> Optional[Any]:
"""simple docstring"""
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger("""transformers-cli/serving""")
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
lowercase_ : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase , args.host , args.port , args.workers )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : dict
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str]
__a : Optional[List[int]]
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : str
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Any
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[str] = parser.add_parser(
"""serve""", help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""", type=snake_case__, choices=get_supported_tasks(), help="""The task to run the pipeline on""", )
serve_parser.add_argument("""--host""", type=snake_case__, default="""localhost""", help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""", type=snake_case__, default=88_88, help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""", type=snake_case__, default=1, help="""Number of http workers""" )
serve_parser.add_argument("""--model""", type=snake_case__, help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""", type=snake_case__, help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""", type=snake_case__, help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""", type=snake_case__, default=-1, help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""", )
serve_parser.set_defaults(func=snake_case__ )
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = pipeline
lowercase_ : List[str] = host
lowercase_ : int = port
lowercase_ : Optional[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"""Serving model over {host}:{port}""" )
lowercase_ : Union[str, Any] = FastAPI(
routes=[
APIRoute(
"""/""", self.model_info, response_model=snake_case__, response_class=snake_case__, methods=["""GET"""], ),
APIRoute(
"""/tokenize""", self.tokenize, response_model=snake_case__, response_class=snake_case__, methods=["""POST"""], ),
APIRoute(
"""/detokenize""", self.detokenize, response_model=snake_case__, response_class=snake_case__, methods=["""POST"""], ),
APIRoute(
"""/forward""", self.forward, response_model=snake_case__, response_class=snake_case__, methods=["""POST"""], ),
], timeout=6_00, )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
run(self._app, host=self.host, port=self.port, workers=self.workers )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self, snake_case__ = Body(snake_case__, embed=snake_case__ ), snake_case__ = Body(snake_case__, embed=snake_case__ ) ) -> Optional[int]:
"""simple docstring"""
try:
lowercase_ : Tuple = self._pipeline.tokenizer.tokenize(snake_case__ )
if return_ids:
lowercase_ : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case__ )
return ServeTokenizeResult(tokens=snake_case__, tokens_ids=snake_case__ )
else:
return ServeTokenizeResult(tokens=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00, detail={"""model""": """""", """error""": str(snake_case__ )} )
def snake_case__ ( self, snake_case__ = Body(snake_case__, embed=snake_case__ ), snake_case__ = Body(snake_case__, embed=snake_case__ ), snake_case__ = Body(snake_case__, embed=snake_case__ ), ) -> Dict:
"""simple docstring"""
try:
lowercase_ : List[Any] = self._pipeline.tokenizer.decode(snake_case__, snake_case__, snake_case__ )
return ServeDeTokenizeResult(model="""""", text=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00, detail={"""model""": """""", """error""": str(snake_case__ )} )
async def snake_case__ ( self, snake_case__=Body(snake_case__, embed=snake_case__ ) ) -> Tuple:
"""simple docstring"""
# Check we don't have empty string
if len(snake_case__ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
lowercase_ : Optional[int] = self._pipeline(snake_case__ )
return ServeForwardResult(output=snake_case__ )
except Exception as e:
raise HTTPException(5_00, {"""error""": str(snake_case__ )} ) | 458 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MvpTokenizer
UpperCamelCase = MvpTokenizerFast
UpperCamelCase = True
UpperCamelCase = filter_roberta_detectors
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Optional[Any] , **A : Optional[int]) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[Any] , **A : Union[str, Any]) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp')
@cached_property
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp')
@require_torch
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , max_length=len(A) , padding=A , return_tensors='pt')
self.assertIsInstance(A , A)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(A , A)
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , padding=A , return_tensors='pt')
# check if input_ids are returned and no labels
self.assertIn('input_ids' , A)
self.assertIn('attention_mask' , A)
self.assertNotIn('labels' , A)
self.assertNotIn('decoder_attention_mask' , A)
@require_torch
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(text_target=A , max_length=32 , padding='max_length' , return_tensors='pt')
self.assertEqual(32 , targets['input_ids'].shape[1])
@require_torch
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=A , truncation=A , return_tensors='pt')
self.assertIsInstance(A , A)
self.assertEqual(batch.input_ids.shape , (2, 10_24))
@require_torch
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.']
_UpperCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , text_target=A , return_tensors='pt')
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A, <mask> AllenNLP sentence.'
_UpperCAmelCase = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A)
_UpperCAmelCase = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
| 639 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_SCREAMING_SNAKE_CASE : List[str] = get_logger(__name__)
class UpperCamelCase__ :
def __init__( self : Any, __lowerCamelCase : Optional[str] = None ) -> Dict:
UpperCamelCase__ : str = (
os.path.join(__lowerCamelCase, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ : Union[str, Any] = Extractor
def __lowercase( self : Tuple, __lowerCamelCase : str ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ : Dict = os.path.abspath(__lowerCamelCase )
return os.path.join(self.extract_dir, hash_url_to_filename(__lowerCamelCase ) )
def __lowercase( self : Union[str, Any], __lowerCamelCase : str, __lowerCamelCase : bool ) -> bool:
return force_extract or (
not os.path.isfile(__lowerCamelCase ) and not (os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ))
)
def __lowercase( self : List[str], __lowerCamelCase : str, __lowerCamelCase : bool = False ) -> str:
UpperCamelCase__ : int = self.extractor.infer_extractor_format(__lowerCamelCase )
if not extractor_format:
return input_path
UpperCamelCase__ : Union[str, Any] = self._get_output_path(__lowerCamelCase )
if self._do_extract(__lowerCamelCase, __lowerCamelCase ):
self.extractor.extract(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return output_path
class UpperCamelCase__ ( __lowerCamelCase ):
@classmethod
@abstractmethod
def __lowercase( cls : Tuple, __lowerCamelCase : Union[Path, str], **__lowerCamelCase : Optional[int] ) -> bool:
...
@staticmethod
@abstractmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
...
class UpperCamelCase__ ( __lowerCamelCase , __lowerCamelCase ):
a__ : List[bytes] = []
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : int ) -> Optional[int]:
with open(__lowerCamelCase, '''rb''' ) as f:
return f.read(__lowerCamelCase )
@classmethod
def __lowercase( cls : int, __lowerCamelCase : Union[Path, str], __lowerCamelCase : bytes = b"" ) -> bool:
if not magic_number:
UpperCamelCase__ : Union[str, Any] = max(len(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ : Union[str, Any] = cls.read_magic_number(__lowerCamelCase, __lowerCamelCase )
except OSError:
return False
return any(magic_number.startswith(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCamelCase__ ( __lowerCamelCase ):
@classmethod
def __lowercase( cls : Dict, __lowerCamelCase : Union[Path, str], **__lowerCamelCase : Any ) -> bool:
return tarfile.is_tarfile(__lowerCamelCase )
@staticmethod
def __lowercase( __lowerCamelCase : Optional[int], __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
def resolved(__lowerCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__lowerCamelCase ) )
def badpath(__lowerCamelCase : str, __lowerCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__lowerCamelCase, __lowerCamelCase ) ).startswith(__lowerCamelCase )
def badlink(__lowerCamelCase : Any, __lowerCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ : Optional[int] = resolved(os.path.join(__lowerCamelCase, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = resolved(__lowerCamelCase )
for finfo in members:
if badpath(finfo.name, __lowerCamelCase ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__lowerCamelCase, __lowerCamelCase ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__lowerCamelCase, __lowerCamelCase ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
UpperCamelCase__ : int = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase, members=TarExtractor.safemembers(__lowerCamelCase, __lowerCamelCase ) )
tar_file.close()
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Tuple = [B'\x1F\x8B']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
with gzip.open(__lowerCamelCase, '''rb''' ) as gzip_file:
with open(__lowerCamelCase, '''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def __lowercase( cls : str, __lowerCamelCase : Union[Path, str], __lowerCamelCase : bytes = b"" ) -> bool:
if super().is_extractable(__lowerCamelCase, magic_number=__lowerCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__lowerCamelCase, '''rb''' ) as fp:
UpperCamelCase__ : List[str] = _EndRecData(__lowerCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ : Optional[Any] = fp.read(__lowerCamelCase ) # CD is where we expect it to be
if len(__lowerCamelCase ) == sizeCentralDir:
UpperCamelCase__ : str = struct.unpack(__lowerCamelCase, __lowerCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
with zipfile.ZipFile(__lowerCamelCase, '''r''' ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Optional[Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
with lzma.open(__lowerCamelCase ) as compressed_file:
with open(__lowerCamelCase, '''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
UpperCamelCase__ : Dict = rarfile.RarFile(__lowerCamelCase )
rf.extractall(__lowerCamelCase )
rf.close()
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Any = [B'\x28\xb5\x2F\xFD']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ : List[str] = zstd.ZstdDecompressor()
with open(__lowerCamelCase, '''rb''' ) as ifh, open(__lowerCamelCase, '''wb''' ) as ofh:
dctx.copy_stream(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Optional[Any] = [B'\x42\x5A\x68']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
with bza.open(__lowerCamelCase, '''rb''' ) as compressed_file:
with open(__lowerCamelCase, '''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Tuple = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
with pyazr.SevenZipFile(__lowerCamelCase, '''r''' ) as archive:
archive.extractall(__lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Tuple = [B'\x04\x22\x4D\x18']
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str] ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(__lowerCamelCase, '''rb''' ) as compressed_file:
with open(__lowerCamelCase, '''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowercase( cls : List[Any] ) -> str:
return max(
len(__lowerCamelCase )
for extractor in cls.extractors.values()
if issubclass(__lowerCamelCase, __lowerCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowercase( __lowerCamelCase : Union[Path, str], __lowerCamelCase : int ) -> int:
try:
return MagicNumberBaseExtractor.read_magic_number(__lowerCamelCase, magic_number_length=__lowerCamelCase )
except OSError:
return b""
@classmethod
def __lowercase( cls : List[Any], __lowerCamelCase : Union[Path, str], __lowerCamelCase : bool = False ) -> bool:
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''', category=__lowerCamelCase, )
UpperCamelCase__ : Tuple = cls.infer_extractor_format(__lowerCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowercase( cls : int, __lowerCamelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
UpperCamelCase__ : Optional[int] = cls._get_magic_number_max_length()
UpperCamelCase__ : int = cls._read_magic_number(__lowerCamelCase, __lowerCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__lowerCamelCase, magic_number=__lowerCamelCase ):
return extractor_format
@classmethod
def __lowercase( cls : int, __lowerCamelCase : Union[Path, str], __lowerCamelCase : Union[Path, str], __lowerCamelCase : Optional[str] = None, __lowerCamelCase : Optional[BaseExtractor] = "deprecated", ) -> None:
os.makedirs(os.path.dirname(__lowerCamelCase ), exist_ok=__lowerCamelCase )
# Prevent parallel extractions
UpperCamelCase__ : int = str(Path(__lowerCamelCase ).with_suffix('''.lock''' ) )
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase, ignore_errors=__lowerCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__lowerCamelCase, __lowerCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''', category=__lowerCamelCase, )
UpperCamelCase__ : Optional[int] = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCamelCase__ : Tuple = cls.extractors[extractor_format]
return extractor.extract(__lowerCamelCase, __lowerCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''', category=__lowerCamelCase, )
for extractor in cls.extractors.values():
if extractor.is_extractable(__lowerCamelCase ):
return extractor.extract(__lowerCamelCase, __lowerCamelCase )
| 344 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ : Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase ,output_loading_info=__lowerCamelCase )
else:
UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase ,output_loading_info=__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
UpperCamelCase__ : Dict = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ : List[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase__ : Union[str, Any] = prophet
UpperCamelCase__ : Union[str, Any] = prophet_old
else:
UpperCamelCase__ : Tuple = prophet.prophetnet
UpperCamelCase__ : str = prophet_old.model
UpperCamelCase__ : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ : Optional[int] = mapping[attribute]
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and len(__lowerCamelCase ) > 0:
UpperCamelCase__ : int = attribute
elif hasattr(__lowerCamelCase ,__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ : List[str] = old_model.weight
logger.info(F'{attribute} is initialized.' )
UpperCamelCase__ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ : Optional[int] = old_model.bias
logger.info(F'{attribute} is initialized' )
UpperCamelCase__ : List[str] = True
break
elif attribute in special_keys and hasattr(__lowerCamelCase ,'''in_proj_weight''' ):
UpperCamelCase__ : List[str] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase__ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase__ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase__ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase__ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase__ : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase__ : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase__ : Tuple = True
break
if attribute.isdigit():
UpperCamelCase__ : Dict = model[int(__lowerCamelCase )]
UpperCamelCase__ : str = old_model[int(__lowerCamelCase )]
else:
UpperCamelCase__ : str = getattr(__lowerCamelCase ,__lowerCamelCase )
if old_attribute == "":
UpperCamelCase__ : Dict = old_model
else:
if not hasattr(__lowerCamelCase ,__lowerCamelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
UpperCamelCase__ : int = getattr(__lowerCamelCase ,__lowerCamelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 344 | 1 |
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ = ya
SCREAMING_SNAKE_CASE__ = xa
for k in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = f(__UpperCamelCase , y[k] )
SCREAMING_SNAKE_CASE__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__ = f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379 | import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt'''}
__lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__lowerCamelCase : Optional[Any] = {
'''openbmb/cpm-ant-10b''': 1024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ = index
return vocab
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : int="<unk>" , _lowercase : int=2_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(_lowercase ):
SCREAMING_SNAKE_CASE__ = len(_lowercase )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = """""".join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = False
def __init__( self : int , _lowercase : str , _lowercase : List[Any]="<d>" , _lowercase : List[Any]="</d>" , _lowercase : Union[str, Any]="<s>" , _lowercase : List[str]="</s>" , _lowercase : str="<pad>" , _lowercase : int="<unk>" , _lowercase : List[str]="</n>" , _lowercase : Tuple="</_>" , _lowercase : Any="left" , **_lowercase : Any , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(_lowercase )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def __a ( self : int , _lowercase : Any , **_lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
return token in self.encoder
def __a ( self : List[str] , _lowercase : List[str] ):
"""simple docstring"""
return "".join(_lowercase )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _lowercase : List[Any] ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
SCREAMING_SNAKE_CASE__ = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder["""\n"""]
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __a ( self : int , _lowercase : List[int] , _lowercase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 379 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase__ : Union[str, Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase__ : Dict = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCAmelCase ( __snake_case : str ) -> str:
if "://" in dataset_path:
__A : str = dataset_path.split('://' )[1]
return dataset_path
def _lowerCAmelCase ( __snake_case : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCAmelCase ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ) -> List[str]:
__A : List[Any] = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ )
def _lowerCAmelCase ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__A : Tuple = None
__A : Union[str, Any] = None
__A : Tuple = threading.Lock() | 8 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ = """\
"""
lowerCAmelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCAmelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = 16 , lowercase = True , lowercase=None ) -> Optional[int]:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = "cuda"
else:
A__ = "cuda" if torch.cuda.is_available() else "cpu"
A__ = AutoModelForCausalLM.from_pretrained(lowercase )
A__ = model.to(lowercase )
A__ = AutoTokenizer.from_pretrained(lowercase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowercase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors="pt" , return_attention_mask=lowercase , ).to(lowercase )
A__ = encodings["input_ids"]
A__ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowercase ) , lowercase ) ):
A__ = min(start_index + batch_size , len(lowercase ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(lowercase , attention_mask=lowercase ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowercase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase )}
| 514 | 0 |
'''simple docstring'''
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / h ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = f(lowerCamelCase__ , y[k] )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + h , y[k] + h * ka )
lowerCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowerCamelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowerCamelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = "vit.encoder.layer."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
elif "huge" in checkpoint_url:
lowerCamelCase_ = 1_4
lowerCamelCase_ = 1_2_8_0
lowerCamelCase_ = 5_1_2_0
lowerCamelCase_ = 3_2
lowerCamelCase_ = 1_6
lowerCamelCase_ = ViTMAEForPreTraining(lowerCamelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase_ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case : List[str] =logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase):
'''simple docstring'''
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 647 |
from itertools import product
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = sides_number
A_ = max_face_number * dice_number
A_ = [0] * (max_total + 1)
A_ = 1
A_ = range(SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE , repeat=SCREAMING_SNAKE_CASE ):
A_ = sum(SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
A_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
A_ = 0
A_ = 9
A_ = 4 * 9
A_ = 6
for peter_total in range(SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A_ = (4**9) * (6**6)
A_ = peter_wins_count / total_games_number
A_ = round(SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 203 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a ( __magic_name__ ):
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : str="", SCREAMING_SNAKE_CASE_ : str="train" ):
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = []
snake_case : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
snake_case : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self : Any ):
return len(self.documents )
def __getitem__( self : Dict, SCREAMING_SNAKE_CASE_ : Any ):
snake_case : Optional[int] = self.documents[idx]
snake_case : Union[str, Any] = document_path.split('''/''' )[-1]
with open(SCREAMING_SNAKE_CASE_, encoding='''utf-8''' ) as source:
snake_case : List[Any] = source.read()
snake_case : List[Any] = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def A ( A_ : str ):
snake_case : Optional[int] = list(filter(lambda A_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
snake_case : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
snake_case : int = []
snake_case : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
snake_case : int = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
snake_case : Tuple = list(filter(lambda A_ : not t.startswith('''@highlight''' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def A ( A_ : List[Any] ):
snake_case : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def A ( A_ : Optional[Any] , A_ : List[str] , A_ : Union[str, Any] ):
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def A ( A_ : Optional[Any] , A_ : int ):
snake_case : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
snake_case : int = sequence == pad_token_id
snake_case : Optional[int] = 0
return mask
def A ( A_ : List[Any] , A_ : Any , A_ : Union[str, Any] ):
snake_case : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
snake_case : str = [token for sentence in story_lines_token_ids for token in sentence]
snake_case : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
snake_case : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def A ( A_ : Optional[int] , A_ : Any ):
snake_case : Optional[int] = []
for sequence in batch:
snake_case : Union[str, Any] = -1
snake_case : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ )
| 703 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( A_ : str , A_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
else:
snake_case : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : List[Any] = ProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
snake_case : Union[str, Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case : str = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case : Optional[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case : Optional[int] = prophet
snake_case : Union[str, Any] = prophet_old
else:
snake_case : Optional[int] = prophet.prophetnet
snake_case : Any = prophet_old.model
snake_case : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case : List[str] = mapping[attribute]
if not hasattr(A_ , A_ ) and len(A_ ) > 0:
snake_case : str = attribute
elif hasattr(A_ , A_ ):
snake_case : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case : Optional[Any] = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
snake_case : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case : List[str] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
snake_case : Tuple = True
break
elif attribute in special_keys and hasattr(A_ , '''in_proj_weight''' ):
snake_case : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
snake_case : Any = getattr(A_ , A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case : Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case : Any = True
break
if attribute.isdigit():
snake_case : Optional[Any] = model[int(A_ )]
snake_case : List[str] = old_model[int(A_ )]
else:
snake_case : Optional[Any] = getattr(A_ , A_ )
if old_attribute == "":
snake_case : Union[str, Any] = old_model
else:
if not hasattr(A_ , A_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
snake_case : Tuple = getattr(A_ , A_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 555 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
a_ :Any =field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ :Dict =Features({"""text""": Value("""string""" )} )
a_ :List[Any] =Features({"""labels""": ClassLabel} )
a_ :Any ="""text"""
a_ :Any ="""labels"""
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
snake_case : Dict = 1_0_0
snake_case : Union[str, Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
snake_case : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def snake_case__ ( __lowercase ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A__ : set[int] = set()
A__ : int
A__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( __lowercase = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __lowercase ):
if len(partition(__lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 182 |
def snake_case__ ( __lowercase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ : Tuple = 1
A__ : Union[str, Any] = 1
while repunit:
A__ : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def snake_case__ ( __lowercase = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A__ : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""") | 182 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> str:
UpperCamelCase__ : Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Any:
UpperCamelCase__ : Any = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase__ : int = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
UpperCamelCase__ : List[str] = s_dict.pop(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> List[Any]:
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = emb.weight.shape
UpperCamelCase__ : Dict = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Union[str, Any] ) -> List[str]:
UpperCamelCase__ : Tuple = torch.load(__UpperCAmelCase , map_location='''cpu''' )
UpperCamelCase__ : List[str] = mam_aaa['''args''']
UpperCamelCase__ : Any = mam_aaa['''model''']
UpperCamelCase__ : List[str] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
UpperCamelCase__ : Any = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCamelCase__ : List[str] = args.share_decoder_input_output_embed
UpperCamelCase__ : List[str] = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCamelCase__ : Dict = SpeechaTextConfig(
vocab_size=__UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=__UpperCAmelCase , decoder_start_token_id=2 , early_stopping=__UpperCAmelCase , )
UpperCamelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
UpperCamelCase__ : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ : str = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 253 |
import pytest
UpperCAmelCase_ = '__dummy_dataset1__'
UpperCAmelCase_ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCAmelCase_ ( ) -> Any:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase_ ( ) -> Any:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict ) -> Tuple:
UpperCamelCase__ : Optional[Any] = dataset_loading_script_name
UpperCamelCase__ : List[Any] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 253 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : int =logging.get_logger(__name__)
_A : Optional[Any] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : Any ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : Union[str, Any] ={'''facebook/blenderbot-3B''': 1_2_8}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BlenderbotTokenizer
def __init__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="replace" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : str=True , **UpperCamelCase_ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
_lowercase : List[str] = add_prefix_space
_lowercase : Any = pre_tok_class(**UpperCamelCase_ )
_lowercase : Tuple = add_prefix_space
_lowercase : Optional[Any] = 'post_processor'
_lowercase : Tuple = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
_lowercase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Any = tuple(state['sep'] )
if "cls" in state:
_lowercase : int = tuple(state['cls'] )
_lowercase : Optional[int] = False
if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = add_prefix_space
_lowercase : str = True
if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets:
_lowercase : Optional[int] = trim_offsets
_lowercase : Optional[int] = True
if changes_to_apply:
_lowercase : str = getattr(UpperCamelCase_ , state.pop('type' ) )
_lowercase : List[str] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
_lowercase : List[Any] = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> BatchEncoding:
'''simple docstring'''
_lowercase : str = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
_lowercase : Union[str, Any] = ' '.join(UpperCamelCase_ )
_lowercase : Dict = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
_lowercase : int = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ ={
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 616 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616 | 1 |
'''simple docstring'''
import sys
__SCREAMING_SNAKE_CASE :Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
for digit in s:
product *= int(__lowercase )
return product
def UpperCAmelCase_ ( __lowercase : str = N ) -> int:
'''simple docstring'''
_UpperCAmelCase = -sys.maxsize - 1
_UpperCAmelCase = n[:13]
_UpperCAmelCase = 13
while cur_index < len(__lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_UpperCAmelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
_UpperCAmelCase = max(__lowercase , str_eval(__lowercase ) )
_UpperCAmelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 119 |
'''simple docstring'''
import sys
from collections import defaultdict
class A_ :
def __init__( self : Dict ):
_UpperCAmelCase = []
def lowercase ( self : Union[str, Any] , snake_case_ : List[str] ):
return self.node_position[vertex]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = pos
def lowercase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any ):
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , snake_case_ )
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , 0 )
def lowercase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any ):
_UpperCAmelCase = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def lowercase ( self : Any , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def UpperCAmelCase_ ( __lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(__lowercase )
_UpperCAmelCase = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(__lowercase , __lowercase )
for _ in range(1 , len(__lowercase ) ):
_UpperCAmelCase = heap.delete_minimum(__lowercase , __lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
__lowercase , heap.get_position(__lowercase ) , __lowercase , __lowercase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE :Optional[int] = int(input('''Enter number of edges: ''').strip())
__SCREAMING_SNAKE_CASE :Optional[int] = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE :Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 119 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Dict = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Union[str, Any] = '''megatron-bert'''
def __init__( self , lowerCAmelCase__=2_90_56 , lowerCAmelCase__=10_24 , lowerCAmelCase__=24 , lowerCAmelCase__=16 , lowerCAmelCase__=40_96 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache | 534 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = len(lowercase )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ChineseCLIPFeatureExtractor''']
lowerCAmelCase__ = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.