code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( a ) -> str:
for param in module.parameters():
_A: Optional[Any] = False
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A: str = """mps"""
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase__ ( a ) -> str:
_A: Optional[Any] = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def lowerCamelCase__ ( ) -> Dict:
_A: Optional[int] = datetime.now()
_A: Dict = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 121 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 0 |
def snake_case_(_UpperCamelCase ) -> list:
"""simple docstring"""
_snake_case = len(lowerCAmelCase__ )
for i in range(1 , lowerCAmelCase__ ):
_snake_case = collection[i]
_snake_case = 0
_snake_case = i - 1
while low <= high:
_snake_case = (low + high) // 2
if val < collection[mid]:
_snake_case = mid - 1
else:
_snake_case = mid + 1
for j in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
_snake_case = collection[j - 1]
_snake_case = val
return collection
if __name__ == "__main__":
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 360 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCAmelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Optional[datasets.Features] = None
def __magic_name__( lowerCamelCase, lowerCamelCase, ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id'''))
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''')
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def _snake_case (self ):
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCamelCase : List[Any] = SparkConfig
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def _snake_case (self ):
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _snake_case (self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case (self , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case (self , __lowercase ):
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def _snake_case (self , __lowercase , __lowercase , __lowercase , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN'''
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
__lowerCAmelCase
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda __lowercase : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '''''' ) , )
def _snake_case (self , __lowercase , ):
return SparkExamplesIterable(self.df )
| 174 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> bool:
if not isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
__SCREAMING_SNAKE_CASE :Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :List[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :Dict = value
elif weight_type == "weight_g":
__UpperCamelCase :Optional[Any] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :List[Any] = value
else:
__UpperCamelCase :Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = fairseq_model.state_dict()
__UpperCamelCase :Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Tuple = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Tuple = True
if "*" in mapped_key:
__UpperCamelCase :str = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Union[str, Any] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :List[Any] = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :Any = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Optional[int] = '''weight'''
elif "bias" in name:
__UpperCamelCase :Union[str, Any] = '''bias'''
else:
__UpperCamelCase :List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Union[str, Any] = name.split('''.''' )
__UpperCamelCase :Tuple = int(items[0] )
__UpperCamelCase :Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :List[Any] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :str = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :List[str] = target_dict.pad_index
__UpperCamelCase :int = target_dict.bos_index
__UpperCamelCase :List[str] = target_dict.eos_index
__UpperCamelCase :Any = len(target_dict.symbols )
__UpperCamelCase :str = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Optional[int] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Optional[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 105 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[str] = emb.weight.shape
__UpperCamelCase :str = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__UpperCamelCase :Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCamelCase :Dict = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight''']
__UpperCamelCase :int = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
__lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 105 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __snake_case ( lowerCamelCase__ ):
a__ = """Wav2Vec2FeatureExtractor"""
a__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase , lowercase)
a__: str = self.feature_extractor
a__: Any = False
@classmethod
def lowerCamelCase_ ( cls , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
try:
return super().from_pretrained(lowercase , **lowercase)
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , lowercase , )
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase , **lowercase)
a__: List[Any] = WavaVecaCTCTokenizer.from_pretrained(lowercase , **lowercase)
return cls(feature_extractor=lowercase , tokenizer=lowercase)
def __call__( self , *lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
a__: Union[str, Any] = kwargs.pop('raw_speech')
else:
a__: Dict = kwargs.pop('audio' , lowercase)
a__: List[str] = kwargs.pop('sampling_rate' , lowercase)
a__: List[str] = kwargs.pop('text' , lowercase)
if len(lowercase) > 0:
a__: Tuple = args[0]
a__: Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
a__: List[Any] = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase)
if text is not None:
a__: Optional[int] = self.tokenizer(lowercase , **lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__: int = encodings["input_ids"]
return inputs
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase)
a__: Optional[Any] = kwargs.pop('input_features' , lowercase)
a__: Optional[Any] = kwargs.pop('labels' , lowercase)
if len(lowercase) > 0:
a__: Any = args[0]
a__: Union[str, Any] = args[1:]
if input_features is not None:
a__: Optional[int] = self.feature_extractor.pad(lowercase , *lowercase , **lowercase)
if labels is not None:
a__: List[str] = self.tokenizer.pad(lowercase , **lowercase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__: Optional[Any] = labels["input_ids"]
return input_features
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@contextmanager
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
a__: Any = True
a__: Any = self.tokenizer
yield
a__: Dict = self.feature_extractor
a__: int = False
| 290 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300 | 0 |
def snake_case_ (__A : int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def snake_case_ (__A : int ) -> bool:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Any = number
while duplicate > 0:
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = divmod(__A , 1_0 )
fact_sum += factorial(__A )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__UpperCAmelCase = int(input("""Enter number: """).strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 355 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
try:
UpperCamelCase = float(__UpperCamelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
UpperCamelCase = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
UpperCamelCase = len(str(__UpperCamelCase ).split('.' )[1] )
UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase = 10**number_of_frac_digits
UpperCamelCase , UpperCamelCase = denominator, numerator
while True:
UpperCamelCase = dividend % divisor
if remainder == 0:
break
UpperCamelCase , UpperCamelCase = divisor, remainder
UpperCamelCase , UpperCamelCase = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction('67') = }''')
print(f'''{decimal_to_fraction('45.0') = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction('6.25') = }''')
print(f'''{decimal_to_fraction('78td') = }''')
| 28 |
"""simple docstring"""
import re
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
try:
__A = split_input(__UpperCamelCase )
if upper:
__A = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__A = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return to_simple_case(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = to_simple_case(__UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 266 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = XLMTokenizer
A__ = False
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = """lower newer"""
lowercase__ = """lower newer"""
return input_text, output_text
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 146 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A : int = trt.Logger(trt.Logger.WARNING)
A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A : Union[str, Any] = logging.getLogger(__name__)
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A : List[Any] = parser.parse_args()
if args.tokenizer_name:
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A : Optional[Any] = args.per_device_eval_batch_size
A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A : Any = True
A : Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A : Union[str, Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
A : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ = time.time()
lowercase__ = end_time - start_time
lowercase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A : str = raw_datasets['validation'].column_names
A : Any = 'question' if 'question' in column_names else column_names[0]
A : int = 'context' if 'context' in column_names else column_names[1]
A : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A : Dict = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
A : str = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ = tokenized_examples.sequence_ids(__magic_name__ )
lowercase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
A : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A : Dict = default_data_collator
A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
A : List[Any] = 0.0
A : Any = 0
A : str = timeit.default_timer()
A : Tuple = None
for step, batch in enumerate(eval_dataloader):
A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A : int = outputs
A : str = torch.tensor(start_logits)
A : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
A : List[str] = nested_truncate(all_preds, len(eval_dataset))
A : List[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 146 | 1 |
'''simple docstring'''
A ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A ={value: key for key, value in encode_dict.items()}
def snake_case_ (_a : str ):
UpperCAmelCase = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def snake_case_ (_a : str ):
if set(_a ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
UpperCAmelCase = ''''''
for word in coded.split():
while len(_a ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[str] , **lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : int , lowerCAmelCase_ : Union[np.ndarray, bytes, str] , **lowerCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] ={}
if "candidate_labels" in kwargs:
A__ : List[Any] =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ : Optional[Any] =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str="This is a sound of {}." ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ : Any =requests.get(lowerCAmelCase_ ).content
else:
with open(lowerCAmelCase_ , """rb""" ) as f:
A__ : Dict =f.read()
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : str =ffmpeg_read(lowerCAmelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase_ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ : int =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ : Union[str, Any] =candidate_labels
A__ : Tuple =[hypothesis_template.format(lowerCAmelCase_ ) for x in candidate_labels]
A__ : Dict =self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework , padding=lowerCAmelCase_ )
A__ : int =[text_inputs]
return inputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =model_inputs.pop("""candidate_labels""" )
A__ : List[str] =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase_ ):
A__ : List[str] =text_inputs[0]
else:
# Batching case.
A__ : Optional[Any] =text_inputs[0][0]
A__ : List[str] =self.model(**lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[str] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Any =model_outputs.pop("""candidate_labels""" )
A__ : str =model_outputs["""logits"""][0]
if self.framework == "pt":
A__ : str =logits.softmax(dim=0 )
A__ : List[str] =probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ : List[str] =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : -x[0] )
]
return result
| 134 | 0 |
"""simple docstring"""
def snake_case ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999)
for b in range(__lowerCAmelCase , 999)
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362 | from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[PIL.Image.Image, np.ndarray]
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
super().__init__()
self.register_modules(
prior=lowerCAmelCase_ , image_encoder=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , renderer=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
if latents is None:
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(lowerCAmelCase_ )
_A = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , torch.Tensor ):
_A = torch.cat(lowerCAmelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase_ , axis=0 )
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
_A = self.image_processor(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase_ )
_A = self.image_encoder(lowerCAmelCase_ )["""last_hidden_state"""]
_A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A = torch.zeros_like(lowerCAmelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 25 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> List[str]:
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_A = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
_A = image.shape[0]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A = len(lowerCAmelCase_ )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase_ )}''' )
_A = self._execution_device
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A = self.scheduler.timesteps
_A = self.prior.config.num_embeddings
_A = self.prior.config.embedding_dim
_A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A = latents.reshape(latents.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self.prior(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , proj_embedding=lowerCAmelCase_ , ).predicted_image_embedding
# remove the variance
_A , _A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A = self.scheduler.step(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase_ )
_A = []
for i, latent in enumerate(lowerCAmelCase_ ):
print()
_A = self.renderer.decode(
latent[None, :] , lowerCAmelCase_ , size=lowerCAmelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowerCAmelCase_ )
_A = torch.stack(lowerCAmelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A = images.cpu().numpy()
if output_type == "pil":
_A = [self.numpy_to_pil(lowerCAmelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase_ )
| 81 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : List[str] = '▁'
A : str = {'vocab_file': 'spiece.model'}
A : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
A : Tuple = {
'google/pegasus-xsum': 5_1_2,
}
A : Tuple = logging.get_logger(__name__)
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<mask_2>" , _snake_case="<mask_1>" , _snake_case=None , _snake_case=103 , _snake_case = None , **_snake_case , ) -> None:
'''simple docstring'''
__a = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_snake_case )}, but is"""
F""" {type(_snake_case )}""" )
__a = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__a = additional_special_tokens_extended
else:
__a = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__a = mask_token_sent
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__a = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, int]:
'''simple docstring'''
__a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__a = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> Any:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 6 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """roberta"""
def __init__( self , __lowerCamelCase=5_0265 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : str = vocab_size
__A : Any = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : List[Any] = hidden_act
__A : Any = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = type_vocab_size
__A : int = initializer_range
__A : Tuple = layer_norm_eps
__A : str = position_embedding_type
__A : int = use_cache
__A : Optional[int] = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179 | 0 |
def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1))
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 81 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> List[Any]:
_A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
))
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""")
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__)
return parser.parse_args()
def snake_case ( ) -> List[str]:
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_A = script_fpath.stem
_A = importlib.import_module(snake_case__)
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 81 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 51 |
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 1 |
'''simple docstring'''
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = '''openai/whisper-base'''
lowerCAmelCase :str = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowerCAmelCase :Optional[int] = '''transcriber'''
lowerCAmelCase :str = WhisperProcessor
lowerCAmelCase :Optional[Any] = WhisperForConditionalGeneration
lowerCAmelCase :List[Any] = ['''audio''']
lowerCAmelCase :Optional[int] = ['''text''']
def snake_case__ ( self , _lowerCamelCase):
return self.pre_processor(_lowerCamelCase , return_tensors="""pt""").input_features
def snake_case__ ( self , _lowerCamelCase):
return self.model.generate(inputs=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.pre_processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase)[0] | 354 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0 , UpperCamelCase__ = 2_2 ):
UpperCAmelCase__ : List[str] = range(1 , UpperCamelCase__ )
UpperCAmelCase__ : int = range(1 , UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""") | 283 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase ( _UpperCAmelCase ):
def __init__(self : Tuple , *snake_case__ : Any , **snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
snake_case : List[str] = {}
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str , *snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : int = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , *snake_case__ : Dict , snake_case__ : Dict=1 , **snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
snake_case : str = []
for i in range(snake_case__ ):
snake_case : Optional[Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
snake_case : List[str] = output
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple=False , snake_case__ : int=1.0 ) -> List[str]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case : Tuple = self.token_map[placeholder_token]
snake_case : Any = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case : Optional[Any] = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
snake_case : int = text.replace(snake_case__ , " ".join(snake_case__ ) )
return text
def __call__(self : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=1.0 , **snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int , *snake_case__ : int , snake_case__ : List[Any]=False , snake_case__ : int=1.0 , **snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 59 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowercase ) , 1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = BigBirdTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Tuple:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def _snake_case ( self ) -> Tuple:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(lowercase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowercase , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 46 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = split_dict._to_yaml_list()
assert len(_A ) == len(_A )
lowerCAmelCase_ = SplitDict._from_yaml_list(_A )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase_ = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_A ), SplitInfo(dataset_name='''my_dataset''' )] )
def __UpperCamelCase ( _A ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase_ = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 167 |
_A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def __UpperCamelCase ( _A , _A , _A ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase_ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_A )}"
)
raise ValueError(_A )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCAmelCase = 'src/diffusers'
# Matches is_xxx_available()
__lowerCAmelCase = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__lowerCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__lowerCAmelCase = '\n{0} = None\n'
__lowerCAmelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__lowerCAmelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = _re_backend.findall(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
# Get to the point we do the actual imports for type checking
_snake_case = 0
_snake_case = {}
# Go through the end of the file
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_snake_case = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
_snake_case = lines[line_index]
_snake_case = _re_single_line_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = objects
else:
line_index += 1
return backend_specific_objects
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if name.isupper():
return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=None ):
if backend_specific_objects is None:
_snake_case = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_snake_case = {}
for backend, objects in backend_specific_objects.items():
_snake_case = """[""" + """, """.join(f"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
_snake_case = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] )
_snake_case = dummy_file
return dummy_files
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=False ):
_snake_case = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_snake_case = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """utils""" )
_snake_case = {
backend: os.path.join(_SCREAMING_SNAKE_CASE , f"""dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py""" )
for backend in dummy_files.keys()
}
_snake_case = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.read()
else:
_snake_case = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f"""diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 341 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike] | 341 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : pyspark.sql.DataFrame , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : Optional[Features] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = "arrow" , **_lowerCAmelCase : List[str] , ):
super().__init__(
split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , **_lowerCAmelCase , )
A = load_from_cache_file
A = file_format
A = Spark(
df=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , working_dir=_lowerCAmelCase , **_lowerCAmelCase , )
def A (self : Optional[int] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 337 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
require_version(deps[pkg] , _lowerCAmelCase )
| 35 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Dict = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
def UpperCAmelCase__ ( ):
lowercase :List[str] = 0
for i in range(1, 1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 158 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Tuple ):
snake_case_ : Tuple = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
snake_case_ : List[Any] = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[Any] = model(lowercase_ )['''last_hidden_state''']
snake_case_ : Tuple = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 155 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 155 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase , _lowercase):
snake_case__ = '''resnet'''
snake_case__ = ['''basic''', '''bottleneck''']
def __init__( self : int , __UpperCamelCase : int=3 , __UpperCamelCase : int=64 , __UpperCamelCase : Any=[256, 512, 1024, 2048] , __UpperCamelCase : Dict=[3, 4, 6, 3] , __UpperCamelCase : Dict="bottleneck" , __UpperCamelCase : Optional[Any]="relu" , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Dict , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_UpperCamelCase = num_channels
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = layer_type
_UpperCamelCase = hidden_act
_UpperCamelCase = downsample_in_first_stage
_UpperCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = version.parse('''1.11''')
@property
def _UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ) -> float:
return 1E-3
| 256 | """simple docstring"""
def lowercase ( a__ : str ) -> list[int]:
_UpperCamelCase = [0 for i in range(len(a__ ) )]
# initialize interval's left pointer and right pointer
_UpperCamelCase , _UpperCamelCase = 0, 0
for i in range(1 , len(a__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_UpperCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_UpperCamelCase = min_edge
while go_next(a__ , a__ , a__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_UpperCamelCase , _UpperCamelCase = i, i + z_result[i] - 1
return z_result
def lowercase ( a__ : int , a__ : list[int] , a__ : str ) -> bool:
return i + z_result[i] < len(a__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase ( a__ : str , a__ : str ) -> int:
_UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(a__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class SCREAMING_SNAKE_CASE__ (A_ , unittest.TestCase ):
__lowerCamelCase : Tuple = BartphoTokenizer
__lowerCamelCase : str = False
__lowerCamelCase : Any = True
def snake_case_ ( self):
super().setUp()
lowercase__ : Optional[int] = ["▁This", "▁is", "▁a", "▁t", "est"]
lowercase__ : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__))))
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""")
lowercase__ : Optional[int] = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self , **a):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case__)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = "This is a là test"
lowercase__ : List[Any] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case_ ( self):
lowercase__ : List[Any] = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map)
lowercase__ : int = "This is a là test"
lowercase__ : Tuple = "▁This ▁is ▁a ▁l à ▁t est".split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(snake_case__)
self.assertListEqual(snake_case__ , snake_case__)
lowercase__ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase__ : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__) , snake_case__)
| 360 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
snake_case_ = '''</w>'''
snake_case_ = '''@@ '''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
lowercase__ : str = do_lower_case
with open(a , encoding='utf-8') as vocab_handle:
lowercase__ : Tuple = json.load(a)
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
lowercase__ : int = None
lowercase__ : List[Any] = None
else:
with open(a , encoding='utf-8') as merges_handle:
lowercase__ : List[Any] = merges_handle.read().split('\n')[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
lowercase__ : Tuple = dict(zip(a , range(len(a))))
lowercase__ : List[str] = {}
@property
def snake_case_ ( self):
return len(self.decoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
lowercase__ : int = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : Any = get_pairs(a)
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Dict = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : int = 0
while i < len(a):
try:
lowercase__ : Dict = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : str = tuple(a)
lowercase__ : Union[str, Any] = new_word
if len(a) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(a)
lowercase__ : List[str] = ' '.join(a)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Optional[int] = '\n' + BPE_TOKEN_MERGES
if word.endswith(a):
lowercase__ : Dict = word.replace(a , '')
lowercase__ : int = word.replace(' ' , a)
lowercase__ : List[str] = word
return word
def snake_case_ ( self , a):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
lowercase__ : int = text.lower()
lowercase__ : Optional[int] = text.split()
lowercase__ : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def snake_case_ ( self , a):
return self.encoder.get(a , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self.decoder.get(a , self.unk_token)
return result
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = ' '.join(a)
# make sure @@ tokens are concatenated
lowercase__ : Optional[int] = ''.join(string.split(a))
return string
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
lowercase__ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : Dict = token_index
writer.write(' '.join(a) + '\n')
index += 1
return (vocab_file, merges_file)
| 216 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''ResNetConfig'''
# Base docstring
__UpperCAmelCase = '''microsoft/resnet-50'''
__UpperCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''microsoft/resnet-50'''
__UpperCAmelCase = '''tiger cat'''
__UpperCAmelCase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu" ) -> List[Any]:
super().__init__()
UpperCamelCase : Optional[Any] = nn.Convad(
__a, __a, kernel_size=__a, stride=__a, padding=kernel_size // 2, bias=__a )
UpperCamelCase : List[Any] = nn.BatchNormad(__a )
UpperCamelCase : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : List[Any] = self.convolution(__a )
UpperCamelCase : Dict = self.normalization(__a )
UpperCamelCase : List[Any] = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__()
UpperCamelCase : Optional[int] = ResNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act )
UpperCamelCase : Dict = nn.MaxPoolad(kernel_size=3, stride=2, padding=1 )
UpperCamelCase : Tuple = config.num_channels
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
UpperCamelCase : int = self.embedder(__a )
UpperCamelCase : Tuple = self.pooler(__a )
return embedding
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 2 ) -> List[str]:
super().__init__()
UpperCamelCase : Union[str, Any] = nn.Convad(__a, __a, kernel_size=1, stride=__a, bias=__a )
UpperCamelCase : int = nn.BatchNormad(__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.convolution(__a )
UpperCamelCase : Dict = self.normalization(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu" ) -> str:
super().__init__()
UpperCamelCase : Tuple = in_channels != out_channels or stride != 1
UpperCamelCase : Tuple = (
ResNetShortCut(__a, __a, stride=__a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase : str = nn.Sequential(
ResNetConvLayer(__a, __a, stride=__a ), ResNetConvLayer(__a, __a, activation=__a ), )
UpperCamelCase : Union[str, Any] = ACTaFN[activation]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = hidden_state
UpperCamelCase : Dict = self.layer(__a )
UpperCamelCase : Optional[int] = self.shortcut(__a )
hidden_state += residual
UpperCamelCase : Tuple = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "relu", SCREAMING_SNAKE_CASE_ = 4 ) -> List[str]:
super().__init__()
UpperCamelCase : int = in_channels != out_channels or stride != 1
UpperCamelCase : str = out_channels // reduction
UpperCamelCase : Optional[int] = (
ResNetShortCut(__a, __a, stride=__a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase : Tuple = nn.Sequential(
ResNetConvLayer(__a, __a, kernel_size=1 ), ResNetConvLayer(__a, __a, stride=__a ), ResNetConvLayer(__a, __a, kernel_size=1, activation=__a ), )
UpperCamelCase : Optional[int] = ACTaFN[activation]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Any = hidden_state
UpperCamelCase : Any = self.layer(__a )
UpperCamelCase : str = self.shortcut(__a )
hidden_state += residual
UpperCamelCase : int = self.activation(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 2, ) -> Any:
super().__init__()
UpperCamelCase : List[str] = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
UpperCamelCase : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__a, __a, stride=__a, activation=config.hidden_act ), *[layer(__a, __a, activation=config.hidden_act ) for _ in range(depth - 1 )], )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Tuple = input
for layer in self.layers:
UpperCamelCase : Tuple = layer(__a )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__()
UpperCamelCase : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__a, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) )
UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__a, config.depths[1:] ):
self.stages.append(ResNetStage(__a, __a, __a, depth=__a ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True ) -> Any:
UpperCamelCase : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase : Optional[Any] = hidden_states + (hidden_state,)
UpperCamelCase : List[str] = stage_module(__a )
if output_hidden_states:
UpperCamelCase : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__a, hidden_states=__a, )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = ResNetConfig
UpperCAmelCase__ : Union[str, Any] = "resnet"
UpperCAmelCase__ : Dict = "pixel_values"
UpperCAmelCase__ : Tuple = True
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
if isinstance(__a, nn.Convad ):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu' )
elif isinstance(__a, (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight, 1 )
nn.init.constant_(module.bias, 0 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
if isinstance(__a, __a ):
UpperCamelCase : Tuple = value
__UpperCAmelCase = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
__UpperCAmelCase = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(__a )
UpperCamelCase : Tuple = config
UpperCamelCase : List[str] = ResNetEmbeddings(__a )
UpperCamelCase : List[str] = ResNetEncoder(__a )
UpperCamelCase : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__a, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : List[str] = self.embedder(__a )
UpperCamelCase : int = self.encoder(
__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : int = encoder_outputs[0]
UpperCamelCase : List[Any] = self.pooler(__a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a, pooler_output=__a, hidden_states=encoder_outputs.hidden_states, )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super().__init__(__a )
UpperCamelCase : Tuple = config.num_labels
UpperCamelCase : str = ResNetModel(__a )
# classification head
UpperCamelCase : str = nn.Sequential(
nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__a, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : List[Any] = self.resnet(__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : Tuple = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase : int = self.classifier(__a )
UpperCamelCase : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase : Optional[Any] = "single_label_classification"
else:
UpperCamelCase : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCamelCase : int = loss_fct(logits.squeeze(), labels.squeeze() )
else:
UpperCamelCase : Optional[Any] = loss_fct(__a, __a )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase : List[str] = CrossEntropyLoss()
UpperCamelCase : Any = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase : Any = BCEWithLogitsLoss()
UpperCamelCase : str = loss_fct(__a, __a )
if not return_dict:
UpperCamelCase : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a, logits=__a, hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , a__ , )
class lowerCAmelCase_ ( a__ , a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(__a )
super()._init_backbone(__a )
UpperCamelCase : Dict = [config.embedding_size] + config.hidden_sizes
UpperCamelCase : List[Any] = ResNetEmbeddings(__a )
UpperCamelCase : Union[str, Any] = ResNetEncoder(__a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@replace_return_docstrings(output_type=__a, config_class=_CONFIG_FOR_DOC )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None ) -> str:
UpperCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Union[str, Any] = self.embedder(__a )
UpperCamelCase : Union[str, Any] = self.encoder(__a, output_hidden_states=__a, return_dict=__a )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__a, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=__a, )
| 119 |
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : List[str] = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
a_ : List[str] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=['rouge2', 'rougeL'])
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=['rouge2'])
assert (
pd.DataFrame(no_aggregation['rouge2']).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2']).fmeasure.mean()
)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'rougeLsum'
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k])[k]
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k])[k]
assert score > score_no_sep
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL']
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase)
assert score_sep == score_no_sep
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
SCREAMING_SNAKE_CASE = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase) == calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
SCREAMING_SNAKE_CASE = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=['rougeLsum'] , newline_sep=_UpperCAmelCase)['rougeLsum']
SCREAMING_SNAKE_CASE = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=['rougeLsum'])['rougeLsum']
assert new_score > prev_score
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Path('examples/seq2seq/test_data/wmt_en_ro')
SCREAMING_SNAKE_CASE = calculate_rouge_path(data_dir.joinpath('test.source') , data_dir.joinpath('test.target'))
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_rouge_path(
data_dir.joinpath('test.source') , data_dir.joinpath('test.target') , bootstrap_aggregation=_UpperCAmelCase)
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
| 351 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'laion/clap-htsat-unfused'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Union[str, Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=a , padding_value=1.0)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = floats_list((3, 1000))
SCREAMING_SNAKE_CASE = feature_extractor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(audios=a , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = 'This is a test string'
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = tokenizer(a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(a)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 327 | 0 |
def lowerCAmelCase_ ( A_ ,A_ = " "):
UpperCamelCase__: Tuple = []
UpperCamelCase__: Dict = 0
for index, char in enumerate(A_):
if char == separator:
split_words.append(string[last_index:index])
UpperCamelCase__: Tuple = index + 1
elif index + 1 == len(A_):
split_words.append(string[last_index : index + 1])
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 149 |
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__: Any = data
UpperCamelCase__: Tuple = previous
UpperCamelCase__: Any = next_node
def __str__( self: str ):
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.data
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return self.next
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.previous
class _a :
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = head
def __iter__( self: Optional[int] ):
'''simple docstring'''
return self
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase__: Tuple = self.current.get_data()
UpperCamelCase__: str = self.current.get_next()
return value
class _a :
"""simple docstring"""
def __init__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = None # First node in list
UpperCamelCase__: str = None # Last node in list
def __str__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.head
UpperCamelCase__: int = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__: Optional[Any] = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self: List[str] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__: int = current.get_next()
return False
def __iter__( self: List[Any] ):
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
UpperCamelCase__: List[str] = node
UpperCamelCase__: List[str] = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: Tuple = node
UpperCamelCase__: int = node.previous
if node.get_previous() is None:
UpperCamelCase__: List[str] = node_to_insert
else:
UpperCamelCase__: Union[str, Any] = node_to_insert
UpperCamelCase__: Dict = node_to_insert
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: List[Any] = node
UpperCamelCase__: Dict = node.next
if node.get_next() is None:
UpperCamelCase__: Optional[int] = node_to_insert
else:
UpperCamelCase__: Optional[int] = node_to_insert
UpperCamelCase__: Any = node_to_insert
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 1
UpperCamelCase__: Dict = Node(__lowerCamelCase )
UpperCamelCase__: Dict = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
UpperCamelCase__: Dict = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__: str = node.get_next()
raise Exception("Node not found" )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Any ):
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
UpperCamelCase__: List[Any] = self.head.get_next()
if node == self.tail:
UpperCamelCase__: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( __lowerCamelCase: Node ):
'''simple docstring'''
if node.get_next():
UpperCamelCase__: List[str] = node.previous
if node.get_previous():
UpperCamelCase__: Union[str, Any] = node.next
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: int = None
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1E-12 ) -> List[str]:
'''simple docstring'''
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase_ , axis=1 ) , a_min=UpperCamelCase_ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase_ , axis=1 ) , a_min=UpperCamelCase_ ) ).T
return jnp.matmul(UpperCamelCase_ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
__lowerCAmelCase = 42
__lowerCAmelCase = jnp.floataa
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=a__ , dtype=self.dtype )
UpperCamelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
UpperCamelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(a__ )[1]
UpperCamelCase = self.visual_projection(a__ )
UpperCamelCase = jax_cosine_distance(a__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(a__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(a__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=a__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(a__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE_ ( __a ):
__lowerCAmelCase = CLIPConfig
__lowerCAmelCase = """clip_input"""
__lowerCAmelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , lowerCamelCase_ : CLIPConfig , lowerCamelCase_ : Optional[Tuple] = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : jnp.dtype = jnp.floataa , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 224, 224, 3)
UpperCamelCase = self.module_class(config=a__ , dtype=a__ , **a__ )
super().__init__(a__ , a__ , input_shape=a__ , seed=a__ , dtype=a__ , _do_init=_do_init )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : jax.random.KeyArray , lowerCamelCase_ : Tuple , lowerCamelCase_ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(a__ , a__ )
UpperCamelCase , UpperCamelCase = jax.random.split(a__ )
UpperCamelCase = {"""params""": params_rng, """dropout""": dropout_rng}
UpperCamelCase = self.module.init(a__ , a__ )["""params"""]
return random_params
def __call__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(a__ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(a__ , dtype=jnp.floataa ) , rngs={} , )
| 360 | def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 165 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase_ = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
if "://" in dataset_path:
snake_case_ : Dict = dataset_path.split('''://''' )[1]
return dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Dict = not is_remote_filesystem(lowercase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase__ ) , fs._strip_protocol(lowercase__ ) )
else:
fs.mv(lowercase__ , lowercase__ , recursive=lowercase__ )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case_ : Any = None
snake_case_ : str = None
snake_case_ : Optional[int] = threading.Lock()
| 279 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a_ ( *lowerCAmelCase_ : Union[str, Any] ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = list(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a_ ( lowerCAmelCase_ : Exception ):
__lowerCAmelCase = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a_ ( lowerCAmelCase_ : callable = None, lowerCAmelCase_ : int = 128 ):
if function is None:
return functools.partial(lowerCAmelCase_, starting_batch_size=lowerCAmelCase_ )
__lowerCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase_ : Dict, **lowerCAmelCase_ : Dict ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowerCAmelCase = list(inspect.signature(lowerCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase_ ) < (len(lowerCAmelCase_ ) + 1):
__lowerCAmelCase = ', '.join([F"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowerCAmelCase_, *lowerCAmelCase_, **lowerCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 351 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = 'A red cat sitting on a park bench'
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCAmelCase_ , output_type='np' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 207 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = 'https://openaipublic.azureedge.net/jukebox/models/'
UpperCamelCase__ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ = key.replace(".model.1.bias", ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ = key.replace(".model.1.weight", ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ = key.replace(".model.3.bias", ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ = key.replace(".model.3.weight", ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
UpperCAmelCase__ = key.replace("conditioner_blocks.0", "conditioner_blocks" )
if "prime_prior" in key:
UpperCAmelCase__ = key.replace("prime_prior", "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase__ = key.replace(".emb.", "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k", ".codebook" )
if "y_emb." in key:
return key.replace("y_emb.", "metadata_embedding." )
if "x_emb.emb." in key:
UpperCAmelCase__ = key.replace("0.x_emb.emb", "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln", "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln", ".layer_norm" )
if "_ln" in key:
return key.replace("_ln", "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj", "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out", "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out", "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb", "embed_tokens" )
return key
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {}
import re
UpperCAmelCase__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__A ):
UpperCAmelCase__ = re_encoder_block_conv_in.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase__ = re_encoder_block_conv_in.sub(__A, __A )
elif re_encoder_block_resnet.fullmatch(__A ):
UpperCAmelCase__ = re_encoder_block_resnet.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCAmelCase__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase__ = prefix + resnet_block
UpperCAmelCase__ = re_encoder_block_resnet.sub(__A, __A )
elif re_encoder_block_proj_out.fullmatch(__A ):
UpperCAmelCase__ = re_encoder_block_proj_out.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCAmelCase__ = re_encoder_block_proj_out.sub(__A, __A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__A ):
UpperCAmelCase__ = re_decoder_block_conv_out.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase__ = re_decoder_block_conv_out.sub(__A, __A )
elif re_decoder_block_resnet.fullmatch(__A ):
UpperCAmelCase__ = re_decoder_block_resnet.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCAmelCase__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase__ = prefix + resnet_block
UpperCAmelCase__ = re_decoder_block_resnet.sub(__A, __A )
elif re_decoder_block_proj_in.fullmatch(__A ):
UpperCAmelCase__ = re_decoder_block_proj_in.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCAmelCase__ = re_decoder_block_proj_in.sub(__A, __A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__A ):
UpperCAmelCase__ = re_prior_cond_conv_out.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase__ = re_prior_cond_conv_out.sub(__A, __A )
elif re_prior_cond_resnet.fullmatch(__A ):
UpperCAmelCase__ = re_prior_cond_resnet.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCAmelCase__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase__ = prefix + resnet_block
UpperCAmelCase__ = re_prior_cond_resnet.sub(__A, __A )
elif re_prior_cond_proj_in.fullmatch(__A ):
UpperCAmelCase__ = re_prior_cond_proj_in.match(__A )
UpperCAmelCase__ = regex_match.groups()
UpperCAmelCase__ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCAmelCase__ = re_prior_cond_proj_in.sub(__A, __A )
# keep original key
else:
UpperCAmelCase__ = original_key
UpperCAmelCase__ = replace_key(__A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
UpperCAmelCase__ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
UpperCAmelCase__ = original_key
UpperCAmelCase__ = original_key
UpperCAmelCase__ = value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( __A=None, __A=None ) -> Optional[Any]:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
UpperCAmelCase__ = requests.get(f"""{PREFIX}{file}""", allow_redirects=__A )
os.makedirs(f"""{pytorch_dump_folder_path}/""", exist_ok=__A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""", "wb" ).write(r.content )
UpperCAmelCase__ = MODEL_MAPPING[model_name.split("/" )[-1]]
UpperCAmelCase__ = JukeboxConfig.from_pretrained(__A )
UpperCAmelCase__ = JukeboxModel(__A )
UpperCAmelCase__ = []
UpperCAmelCase__ = {}
for i, dict_name in enumerate(__A ):
UpperCAmelCase__ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
UpperCAmelCase__ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
UpperCAmelCase__ = old_dic[k]
elif k.endswith(".w" ):
UpperCAmelCase__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase__ = old_dic[k]
else:
UpperCAmelCase__ = old_dic[k]
UpperCAmelCase__ = "vqvae" if i == 0 else f"""priors.{3 - i}"""
UpperCAmelCase__ = fix_jukebox_keys(__A, model.state_dict(), __A, __A )
weight_dict.append(__A )
UpperCAmelCase__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(__A )
for i in range(len(__A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__A ).mkdir(exist_ok=__A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""", "w" ) as txtfile:
json.dump(__A, __A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
return weight_dict
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
UpperCamelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 65 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 32 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32 | 1 |
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( ) -> Generator[int, None, None]:
"""simple docstring"""
a_ : dict[int, int] = {}
a_ : Tuple = 2
while True:
a_ : Optional[int] = factor_map.pop(__A , __A )
if factor:
a_ : Union[str, Any] = factor + prime
while x in factor_map:
x += factor
a_ : List[Any] = factor
else:
a_ : Tuple = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE_ ( __A : float = 1e1_0 ) -> int:
"""simple docstring"""
a_ : List[str] = sieve()
a_ : Any = 1
while True:
a_ : List[Any] = next(__A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__A )
n += 2
if __name__ == "__main__":
print(solution())
| 32 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32 | 1 |
'''simple docstring'''
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = n
a : Dict = [None] * self.n
a : int = 0 # index of the first element
a : Optional[int] = 0
a : Optional[Any] = 0
def __len__( self : int):
"""simple docstring"""
return self.size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.size == 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a : int = data
a : str = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW')
a : Union[str, Any] = self.array[self.front]
a : Optional[Any] = None
a : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__: List[Any] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "detr"
__UpperCamelCase : Any = ["past_key_values"]
__UpperCamelCase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_0 , SCREAMING_SNAKE_CASE :Dict=6 , SCREAMING_SNAKE_CASE :Any=2_0_4_8 , SCREAMING_SNAKE_CASE :Optional[Any]=8 , SCREAMING_SNAKE_CASE :Dict=6 , SCREAMING_SNAKE_CASE :Dict=2_0_4_8 , SCREAMING_SNAKE_CASE :Optional[Any]=8 , SCREAMING_SNAKE_CASE :Optional[Any]=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :List[Any]="relu" , SCREAMING_SNAKE_CASE :Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE :Dict=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.0 , SCREAMING_SNAKE_CASE :str=0.0 , SCREAMING_SNAKE_CASE :List[str]=0.02 , SCREAMING_SNAKE_CASE :Tuple=1.0 , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Dict="sine" , SCREAMING_SNAKE_CASE :str="resnet50" , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Any=1 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :str=2 , SCREAMING_SNAKE_CASE :Optional[Any]=1 , SCREAMING_SNAKE_CASE :Any=1 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , **SCREAMING_SNAKE_CASE :str , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_a : int =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_a : str =backbone_config.get("""model_type""" )
_a : List[Any] =CONFIG_MAPPING[backbone_model_type]
_a : List[str] =config_class.from_dict(_lowerCAmelCase )
# set timm attributes to None
_a , _a , _a : Tuple =None, None, None
_a : Optional[Any] =use_timm_backbone
_a : int =backbone_config
_a : List[Any] =num_channels
_a : Union[str, Any] =num_queries
_a : Dict =d_model
_a : List[Any] =encoder_ffn_dim
_a : Dict =encoder_layers
_a : Union[str, Any] =encoder_attention_heads
_a : List[str] =decoder_ffn_dim
_a : str =decoder_layers
_a : Tuple =decoder_attention_heads
_a : Dict =dropout
_a : List[Any] =attention_dropout
_a : str =activation_dropout
_a : int =activation_function
_a : Optional[int] =init_std
_a : Optional[Any] =init_xavier_std
_a : str =encoder_layerdrop
_a : List[Any] =decoder_layerdrop
_a : List[str] =encoder_layers
_a : Dict =auxiliary_loss
_a : Dict =position_embedding_type
_a : Tuple =backbone
_a : Union[str, Any] =use_pretrained_backbone
_a : List[str] =dilation
# Hungarian matcher
_a : List[str] =class_cost
_a : List[str] =bbox_cost
_a : Optional[Any] =giou_cost
# Loss coefficients
_a : List[Any] =mask_loss_coefficient
_a : int =dice_loss_coefficient
_a : int =bbox_loss_coefficient
_a : Tuple =giou_loss_coefficient
_a : Union[str, Any] =eos_coefficient
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def __UpperCAmelCase ( cls :Tuple , SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
return cls(backbone_config=_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self :Any ) -> Dict[str, any]:
'''simple docstring'''
_a : Optional[Any] =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a : Any =self.backbone_config.to_dict()
_a : Dict =self.__class__.model_type
return output
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[Any] = version.parse("1.11" )
@property
def __UpperCAmelCase ( self :str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return 1_2
| 276 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = config_class
_lowerCAmelCase = has_text_modality
_lowerCAmelCase = kwargs
_lowerCAmelCase = common_properties
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase ):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase ):
try:
_lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" )
config_first.to_json_file(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _snake_case ( self ) -> List[Any]:
if self.config_class.is_composition:
return
_lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
_lowerCAmelCase = self.config_class(**_lowerCAmelCase )
_lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def _snake_case ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 158 | 0 |
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] ):
lowercase__ : Tuple = ""
lowercase__ : int = ""
lowercase__ : int = []
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ : Tuple = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE , n - 1 )
lowercase__ : Any = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ : List[str] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Optional[int] = worda
lowercase__ : List[Any] = worda
lowercase__ : List[Any] = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE ) )] for _ in range(len(SCREAMING_SNAKE_CASE ) )]
return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE ) - 1 , len(SCREAMING_SNAKE_CASE ) - 1 )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : int = worda
lowercase__ : Tuple = worda
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ : List[Any] = j
elif j == 0: # second string is empty
lowercase__ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ : Any = self.dp[i - 1][j - 1]
else:
lowercase__ : str = self.dp[i][j - 1]
lowercase__ : str = self.dp[i - 1][j]
lowercase__ : Tuple = self.dp[i - 1][j - 1]
lowercase__ : Tuple = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCAmelCase__ = input('''Enter the first string: ''').strip()
lowerCAmelCase__ = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 121 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : List[str] = []
for old_item in old_list:
lowercase__ : Optional[Any] = old_item.replace("in_layers.0" , "norm1" )
lowercase__ : Union[str, Any] = new_item.replace("in_layers.2" , "conv1" )
lowercase__ : Optional[Any] = new_item.replace("out_layers.0" , "norm2" )
lowercase__ : Union[str, Any] = new_item.replace("out_layers.3" , "conv2" )
lowercase__ : Dict = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase__ : int = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase__ : Tuple = shave_segments(lowerCamelCase__ , n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : str = []
for old_item in old_list:
lowercase__ : Optional[int] = old_item
lowercase__ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase__ : Optional[int] = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase__ : Tuple = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase__ : List[Any] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase__ : Optional[Any] = shave_segments(lowerCamelCase__ , n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ : List[str] = old_checkpoint[path]
lowercase__ : str = old_tensor.shape[0] // 3
lowercase__ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ : Union[str, Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowercase__ : Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ : Dict = query.reshape(lowerCamelCase__ )
lowercase__ : Dict = key.reshape(lowerCamelCase__ )
lowercase__ : int = value.reshape(lowerCamelCase__ )
for path in paths:
lowercase__ : Union[str, Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ : List[Any] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase__ : Optional[Any] = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase__ : List[str] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ : Tuple = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ : List[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowercase__ : List[Any] = old_checkpoint[path["old"]]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = checkpoint["time_embed.0.weight"]
lowercase__ : Tuple = checkpoint["time_embed.0.bias"]
lowercase__ : Dict = checkpoint["time_embed.2.weight"]
lowercase__ : Optional[Any] = checkpoint["time_embed.2.bias"]
lowercase__ : Optional[int] = checkpoint["input_blocks.0.0.weight"]
lowercase__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
lowercase__ : Tuple = checkpoint["out.0.weight"]
lowercase__ : List[Any] = checkpoint["out.0.bias"]
lowercase__ : Tuple = checkpoint["out.2.weight"]
lowercase__ : Optional[Any] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowercase__ : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase__ : str = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the output blocks only
lowercase__ : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase__ : Tuple = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
for i in range(1 , lowerCamelCase__ ):
lowercase__ : Tuple = (i - 1) // (config["num_res_blocks"] + 1)
lowercase__ : Optional[int] = (i - 1) % (config["num_res_blocks"] + 1)
lowercase__ : List[Any] = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
lowercase__ : Dict = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowercase__ : int = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
lowercase__ : List[str] = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
lowercase__ : Union[str, Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Optional[int] = {"old": F"""input_blocks.{i}.0""", "new": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowercase__ : Optional[int] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=lowerCamelCase__ )
if len(lowerCamelCase__ ):
lowercase__ : Tuple = renew_attention_paths(lowerCamelCase__ )
lowercase__ : str = {
"old": F"""input_blocks.{i}.1""",
"new": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : List[str] = {
F"""input_blocks.{i}.1.qkv.bias""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCamelCase__ , config=lowerCamelCase__ , )
lowercase__ : int = middle_blocks[0]
lowercase__ : Dict = middle_blocks[1]
lowercase__ : Dict = middle_blocks[2]
lowercase__ : Any = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
lowercase__ : List[Any] = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
lowercase__ : Optional[int] = renew_attention_paths(lowerCamelCase__ )
lowercase__ : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , attention_paths_to_split=lowerCamelCase__ , config=lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
lowercase__ : List[Any] = i // (config["num_res_blocks"] + 1)
lowercase__ : Optional[int] = i % (config["num_res_blocks"] + 1)
lowercase__ : List[Any] = [shave_segments(lowerCamelCase__ , 2 ) for name in output_blocks[i]]
lowercase__ : Optional[Any] = {}
for layer in output_block_layers:
lowercase__ , lowercase__ : str = layer.split("." )[0], shave_segments(lowerCamelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCamelCase__ )
else:
lowercase__ : Tuple = [layer_name]
if len(lowerCamelCase__ ) > 1:
lowercase__ : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
lowercase__ : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
lowercase__ : Optional[Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Optional[Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Tuple = {"old": F"""output_blocks.{i}.0""", "new": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ : List[str] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase__ : Tuple = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
lowercase__ : Optional[Any] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCamelCase__ ) == 2:
lowercase__ : int = []
if len(lowerCamelCase__ ):
lowercase__ : Tuple = renew_attention_paths(lowerCamelCase__ )
lowercase__ : str = {
"old": F"""output_blocks.{i}.1""",
"new": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : Union[str, Any] = {
F"""output_blocks.{i}.1.qkv.bias""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=lowerCamelCase__ , )
else:
lowercase__ : int = renew_resnet_paths(lowerCamelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ : List[Any] = ".".join(["output_blocks", str(lowerCamelCase__ ), path["old"]] )
lowercase__ : Any = ".".join(["up_blocks", str(lowerCamelCase__ ), "resnets", str(lowerCamelCase__ ), path["new"]] )
lowercase__ : List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 121 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Dict = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_a ).to(_a )
_A : Tuple = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_A : List[Any] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
_A : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
_A : Dict = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
_A : Tuple = -(labels.shape[-1] * loss.item())
_A : Union[str, Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 26 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset | 189 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' )
return image
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _a ( _lowercase : Dict , _lowercase : Any , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = dct.pop(_lowercase )
__UpperCAmelCase : Optional[int] = val
def _a ( _lowercase : Optional[Any] , _lowercase : Dict ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : Optional[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
__UpperCAmelCase : Optional[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__UpperCAmelCase : Optional[int] = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCAmelCase : List[Any] = qkv_bias
def _a ( _lowercase : Dict , _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 364 if '''coco''' in model_name else 224
__UpperCAmelCase : List[str] = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCAmelCase : Dict = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCAmelCase : int = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCAmelCase : List[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : Optional[int] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__UpperCAmelCase : Dict = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _a ( _lowercase : Optional[Any] , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__UpperCAmelCase : List[Any] = tokenizer('''\n''' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCAmelCase , __UpperCAmelCase : int = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase : Any = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCAmelCase : List[str] = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__UpperCAmelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__UpperCAmelCase : Optional[int] = original_model.state_dict()
__UpperCAmelCase : Union[str, Any] = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
if key.startswith('''Qformer.bert''' ):
__UpperCAmelCase : int = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__UpperCAmelCase : Any = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__UpperCAmelCase : List[str] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__UpperCAmelCase : str = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__UpperCAmelCase : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__UpperCAmelCase : int = key.replace('''t5''' , '''language''' )
__UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCAmelCase , __UpperCAmelCase : str = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCAmelCase : Optional[int] = load_demo_image()
__UpperCAmelCase : Any = vis_processors['''eval'''](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCAmelCase : List[str] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowercase )
# create processor
__UpperCAmelCase : Dict = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCAmelCase : List[Any] = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase : Union[str, Any] = processor(images=_lowercase , return_tensors='''pt''' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCAmelCase : Union[str, Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__UpperCAmelCase : Any = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCAmelCase : Dict = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__UpperCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__UpperCAmelCase : List[Any] = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCAmelCase : Dict = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowercase )
else:
# cast to same type
__UpperCAmelCase : Dict = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__UpperCAmelCase : Any = ''''''
__UpperCAmelCase : List[Any] = tokenizer(_lowercase , return_tensors='''pt''' ).input_ids.to(_lowercase )
__UpperCAmelCase : Dict = original_model.generate({'''image''': original_pixel_values} )
__UpperCAmelCase : Union[str, Any] = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowercase )
__UpperCAmelCase : Tuple = input_ids.shape[1]
__UpperCAmelCase : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCAmelCase : str = [text.strip() for text in output_text]
print('''HF generation:''' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Dict = argparse.ArgumentParser()
__UpperCAmelCase :str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__UpperCAmelCase :Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 240 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase :List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 240 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __snake_case ):
__magic_name__ = DistilBertTokenizer
__magic_name__ = DistilBertTokenizerFast
__magic_name__ = True
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowercase = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[Any]="</s>" ,A_ : Union[str, Any]="<unk>" ,A_ : Dict=[] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ ,unk_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : int ,A_ : str ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Optional[Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 74 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 | 1 |
'''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = '''instructblip_vision_model'''
def __init__( self , __lowercase=1_408 , __lowercase=6_144 , __lowercase=39 , __lowercase=16 , __lowercase=224 , __lowercase=14 , __lowercase="gelu" , __lowercase=1E-6 , __lowercase=0.0 , __lowercase=1E-1_0 , __lowercase=True , **__lowercase , ) -> Tuple:
super().__init__(**A_)
__UpperCamelCase :Union[str, Any] = hidden_size
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Union[str, Any] = num_hidden_layers
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :int = patch_size
__UpperCamelCase :List[str] = image_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :Union[str, Any] = attention_dropout
__UpperCamelCase :Tuple = layer_norm_eps
__UpperCamelCase :List[Any] = hidden_act
__UpperCamelCase :Optional[Any] = qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_)
__UpperCamelCase , __UpperCamelCase :List[str] = cls.get_config_dict(A_ , **A_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCamelCase :Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = '''instructblip_qformer'''
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=0 , __lowercase="absolute" , __lowercase=2 , __lowercase=1_408 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=A_ , **A_)
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :int = hidden_size
__UpperCamelCase :Union[str, Any] = num_hidden_layers
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_act
__UpperCamelCase :List[Any] = intermediate_size
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Tuple = attention_probs_dropout_prob
__UpperCamelCase :List[str] = max_position_embeddings
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[Any] = layer_norm_eps
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Tuple = cross_attention_frequency
__UpperCamelCase :Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_)
__UpperCamelCase , __UpperCamelCase :Any = cls.get_config_dict(A_ , **A_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCamelCase :Optional[Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Dict = '''instructblip'''
a__ : List[Any] = True
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=32 , **__lowercase) -> Optional[int]:
super().__init__(**A_)
if vision_config is None:
__UpperCamelCase :Dict = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
__UpperCamelCase :Any = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
__UpperCamelCase :Dict = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
__UpperCamelCase :Optional[Any] = InstructBlipVisionConfig(**A_)
__UpperCamelCase :Dict = InstructBlipQFormerConfig(**A_)
__UpperCamelCase :List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__UpperCamelCase :int = CONFIG_MAPPING[text_model_type](**A_)
__UpperCamelCase :List[str] = self.text_config.tie_word_embeddings
__UpperCamelCase :List[str] = self.text_config.is_encoder_decoder
__UpperCamelCase :Optional[int] = num_query_tokens
__UpperCamelCase :List[str] = self.vision_config.hidden_size
__UpperCamelCase :str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase :Union[str, Any] = 1.0
__UpperCamelCase :Union[str, Any] = 0.02
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , __lowercase , **__lowercase , ) -> List[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Any = self.vision_config.to_dict()
__UpperCamelCase :Any = self.qformer_config.to_dict()
__UpperCamelCase :Dict = self.text_config.to_dict()
__UpperCamelCase :List[str] = self.__class__.model_type
return output
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
from statistics import mean
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 0
# Number of processes finished
UpperCAmelCase_ : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase_ : List[Any] = [0] * no_of_process
# List to include calculation results
UpperCAmelCase_ : Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase_ : List[str] = [burst_time[i] for i in np.argsort(lowerCamelCase__ )]
UpperCAmelCase_ : int = [process_name[i] for i in np.argsort(lowerCamelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase_ : Optional[int] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase_ : Union[str, Any] = arrival_time[i]
UpperCAmelCase_ : List[Any] = 0
# Index showing the location of the process being performed
UpperCAmelCase_ : str = 0
# Saves the current response ratio.
UpperCAmelCase_ : str = 0
for i in range(0 , lowerCamelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase_ : str = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase_ : Tuple = temp
UpperCAmelCase_ : List[Any] = i
# Calculate the turn around time
UpperCAmelCase_ : List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase_ : Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [0] * no_of_process
for i in range(0 , lowerCamelCase__ ):
UpperCAmelCase_ : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__a = 5
__a = ['A', 'B', 'C', 'D', 'E']
__a = [1, 2, 3, 4, 5]
__a = [1, 2, 3, 4, 5]
__a = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__a = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""") | 353 |
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : List[Any] = 2 * i + 1
UpperCAmelCase_ : Optional[int] = 2 * i
UpperCAmelCase_ : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 235 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCamelCase = 25_0004
_UpperCamelCase = 25_0020
@require_sentencepiece
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = MBartTokenizer
_UpperCamelCase = MBartTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : List[Any] = MBartTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = MBartTokenizer(A_ , keep_accents=A_ )
__lowerCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCAmelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase : Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(A_ , **A_ )
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : int = tokenizer_r.save_pretrained(A_ )
__lowerCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : Any = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase : List[Any] = tempfile.mkdtemp()
__lowerCAmelCase : Dict = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__lowerCAmelCase : Any = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : Any = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
__lowerCAmelCase : str = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__lowerCAmelCase : int = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase : str = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
_UpperCamelCase = """facebook/mbart-large-en-ro"""
_UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_UpperCamelCase = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def UpperCamelCase__ ( cls ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowerCAmelCase : Optional[Any] = 1
return cls
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
__lowerCAmelCase : Any = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
__lowerCAmelCase : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__lowerCAmelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A_ )
__lowerCAmelCase : int = 10
__lowerCAmelCase : int = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A_ )
self.assertEqual(len(A_ ) , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = tempfile.mkdtemp()
__lowerCAmelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
__lowerCAmelCase : Dict = MBartTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors='''pt''' )
__lowerCAmelCase : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='''pt''' )
__lowerCAmelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='''pt''' )
__lowerCAmelCase : Union[str, Any] = targets['''input_ids''']
__lowerCAmelCase : int = shift_tokens_right(A_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 275 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Optional[Any] = global_rng
__lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Any = min_seq_length
__lowerCAmelCase : Tuple = max_seq_length
__lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Dict = feature_size
__lowerCAmelCase : Optional[int] = padding_value
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : Union[str, Any] = return_attention_mask
__lowerCAmelCase : Dict = do_normalize
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WavaVecaFeatureExtractor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : List[Any] = np.asarray(A_ )
__lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
__lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ )
__lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : List[str] = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : int = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Optional[int] = feat_extract(
A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
import torch
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa )
__lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 275 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(lowerCAmelCase )
UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(lowerCAmelCase , lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase , """__name__""" , lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module("""transformers""" )
if hasattr(lowerCAmelCase , lowerCAmelCase ):
return getattr(lowerCAmelCase , lowerCAmelCase )
return None
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , **lowerCAmelCase , ):
'''simple docstring'''
UpperCAmelCase = get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(lowerCAmelCase )
class UpperCamelCase_ :
def __init__( self ) -> Tuple:
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def UpperCamelCase_ ( cls , snake_case__ , **snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = kwargs.pop("""config""" , snake_case__ )
UpperCAmelCase = kwargs.pop("""trust_remote_code""" , snake_case__ )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
UpperCAmelCase = config_dict.get("""image_processor_type""" , snake_case__ )
UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase = config_dict.pop("""feature_extractor_type""" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCAmelCase = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCAmelCase = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
UpperCAmelCase = getattr(snake_case__ , """image_processor_type""" , snake_case__ )
if hasattr(snake_case__ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCAmelCase = image_processor_class_from_name(snake_case__ )
UpperCAmelCase = image_processor_auto_map is not None
UpperCAmelCase = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
UpperCAmelCase = kwargs.pop("""code_revision""" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase_ ( snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 248 |
"""simple docstring"""
lowerCAmelCase_ : Dict = {str(digit): digit**5 for digit in range(1_0)}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 248 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( a :Namespace ) -> Optional[int]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase__ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowercase_ ( lowercase ):
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : ArgumentParser ) ->List[str]:
"""simple docstring"""
a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__UpperCAmelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , *__UpperCAmelCase : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
a = model_type
a = tf_checkpoint
a = pytorch_dump_output
a = config
a = finetuning_task_name
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
a = self._tf_checkpoint
a = ''''''
else:
a = self._tf_checkpoint
a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 1 |
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] ) -> None:
UpperCAmelCase : Optional[int] = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase : Dict = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
UpperCAmelCase : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Optional[int] = [1, 3, 0, 5, 8, 5]
_lowerCamelCase : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 355 |
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : int | float | str , snake_case__ : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase = int(snake_case__ )
lowerCAmelCase = int(snake_case__ )
lowerCAmelCase = []
for temp in range(int(snake_case__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(snake_case__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('Enter the last number (nth term) of the P-Series'))
a = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 155 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCAmelCase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__lowerCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCAmelCase = '''allenai'''
def snake_case_ ( snake_case ) -> Dict:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase__: Optional[Any] = dict((re.sub(R'@@$' , '' , snake_case ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , snake_case ), v) for k, v in d.items() )
lowercase__: Any = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase__: Any = d[k] # restore
return da
def snake_case_ ( snake_case , snake_case ) -> Optional[Any]:
# prep
assert os.path.exists(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__: Optional[Any] = basename(snake_case )
lowercase__: Union[str, Any] = dirname(snake_case )
lowercase__: Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__: List[Any] = cls.hub_models()
lowercase__: Union[str, Any] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowercase__: Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
lowercase__: str = hub_utils.from_pretrained(
snake_case , snake_case , snake_case , archive_map=snake_case , **snake_case )
lowercase__: int = vars(chkpt['args']['model'] )
lowercase__: Optional[Any] = args['source_lang']
lowercase__: Dict = args['target_lang']
lowercase__: List[str] = dirname(snake_case )
lowercase__: Any = basename(snake_case )
# dicts
lowercase__: int = os.path.join(snake_case , f'dict.{src_lang}.txt' )
lowercase__: str = os.path.join(snake_case , f'dict.{tgt_lang}.txt' )
lowercase__: List[str] = Dictionary.load(snake_case )
lowercase__: Optional[int] = rewrite_dict_keys(src_dict.indices )
lowercase__: Optional[int] = len(snake_case )
lowercase__: Union[str, Any] = os.path.join(snake_case , 'vocab-src.json' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__: Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
lowercase__: Tuple = False
break
lowercase__: List[Any] = Dictionary.load(snake_case )
lowercase__: Tuple = rewrite_dict_keys(tgt_dict.indices )
lowercase__: Dict = len(snake_case )
lowercase__: Optional[int] = os.path.join(snake_case , 'vocab-tgt.json' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
lowercase__: Union[str, Any] = os.path.join(snake_case , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__: Tuple = os.path.join(snake_case , snake_case )
if os.path.exists(snake_case ):
break
with open(snake_case , encoding='utf-8' ) as fin:
lowercase__: Optional[int] = fin.read()
lowercase__: List[str] = re.sub(R' \d+$' , '' , snake_case , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as fout:
fout.write(snake_case )
# model config
lowercase__: Any = os.path.join(snake_case , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__: Optional[Any] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowercase__: Union[str, Any] = 5
lowercase__: List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__: Tuple = best_score_hparams[model_dir]['length_penalty']
else:
lowercase__: Union[str, Any] = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
lowercase__: Optional[int] = os.path.join(snake_case , snake_case )
lowercase__: Any = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
lowercase__: int = chkpt['models'][0]
lowercase__: Optional[int] = model.state_dict()
# rename keys to start with 'model.'
lowercase__: List[str] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__: int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
lowercase__: Dict = FSMTConfig.from_pretrained(snake_case )
lowercase__: Union[str, Any] = FSMTForConditionalGeneration(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case , strict=snake_case )
# save
lowercase__: Tuple = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 288 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Dict = os.path.abspath(snake_case )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__: Optional[Any] = tf.train.list_variables(snake_case )
lowercase__: List[Any] = []
lowercase__: Tuple = []
lowercase__: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__: Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__: str = name[1:]
# figure out how many levels deep the name is
lowercase__: Optional[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
lowercase__: Optional[Any] = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(f'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
lowercase__: Any = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
lowercase__: Optional[int] = full_name.split('/' )
lowercase__: List[Any] = model
lowercase__: Any = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__: Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'embeddings' )
lowercase__: int = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__: int = getattr(snake_case , 'encoder' )
lowercase__: List[str] = getattr(snake_case , 'layer' )
lowercase__: Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__: Tuple = getattr(snake_case , 'pooler' )
lowercase__: Tuple = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__: Dict = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__: Optional[Any] = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
lowercase__: List[str] = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__: int = getattr(snake_case , 'attention' )
lowercase__: Union[str, Any] = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'attention' )
lowercase__: Optional[Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__: Optional[Any] = getattr(snake_case , 'attention' )
lowercase__: Optional[int] = getattr(snake_case , 'output' )
lowercase__: Optional[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__: Any = getattr(snake_case , 'output' )
lowercase__: str = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__: Tuple = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__: List[str] = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__: Optional[int] = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__: Any = getattr(snake_case , 'intermediate' )
lowercase__: str = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__: str = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__: Tuple = getattr(snake_case , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__: Any = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
lowercase__: Union[str, Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__: str = array.transpose()
if pointer.shape == array.shape:
lowercase__: Union[str, Any] = torch.from_numpy(snake_case )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__: int = BertConfig.from_json_file(snake_case )
lowercase__: Tuple = BertModel(snake_case )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 288 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ():
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ():
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ():
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head("""https://huggingface.co""" )
| 147 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] ='''M-CLIP'''
def __init__( self , _A=1_024 , _A=768 , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = transformerDimSize
__SCREAMING_SNAKE_CASE = imageDimSize
super().__init__(**_snake_case )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any =MCLIPConfig
def __init__( self , _A , *_A , **_A ):
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
__SCREAMING_SNAKE_CASE = XLMRobertaModel(_snake_case )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.transformer(input_ids=_snake_case , attention_mask=_snake_case )[0]
__SCREAMING_SNAKE_CASE = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_snake_case ), embs
| 357 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) )
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = decoder_config.pad_token_id
if decoder_start_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.bos_token_id
if pad_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 118 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
A = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
A = downstream_dict["""projector.weight"""]
A = downstream_dict["""projector.bias"""]
A = downstream_dict["""model.post_net.linear.weight"""]
A = downstream_dict["""model.post_net.linear.bias"""]
return model
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
A = downstream_dict["""model.linear.weight"""]
A = downstream_dict["""model.linear.bias"""]
return model
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
A = WavaVecaForXVector.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
A = downstream_dict["""connector.weight"""]
A = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = torch.load(UpperCAmelCase , map_location="""cpu""" )
A = checkpoint["""Downstream"""]
A = WavaVecaConfig.from_pretrained(UpperCAmelCase )
A = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , do_normalize=UpperCAmelCase )
A = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A = convert_classification(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
A = convert_diarization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith("""ForXVector""" ):
A = convert_xvector(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCAmelCase )
hf_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
snake_case : Any = parent
snake_case : List[Any] = batch_size
snake_case : List[Any] = seq_length
snake_case : Dict = is_training
snake_case : List[str] = use_input_mask
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : List[str] = num_choices
snake_case : Optional[int] = scope
def UpperCAmelCase ( self ) -> int:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = NystromformerModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A )
snake_case : List[str] = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : List[Any] = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : List[Any] = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Optional[int] = self.num_labels
snake_case : Union[str, Any] = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Union[str, Any] = self.num_labels
snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = self.num_choices
snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = NystromformerModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case : Optional[int] = model(A )[0]
snake_case : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels"""
snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
snake_case : Optional[int] = model(encoding.input_ids ).logits
snake_case : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 124 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''deberta-v2'''
def __init__( self: int , _lowerCAmelCase: Union[str, Any]=12_81_00 , _lowerCAmelCase: int=15_36 , _lowerCAmelCase: Optional[Any]=24 , _lowerCAmelCase: Tuple=24 , _lowerCAmelCase: Optional[Any]=61_44 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: Any=5_12 , _lowerCAmelCase: Optional[Any]=0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: List[Any]=1e-7 , _lowerCAmelCase: Any=False , _lowerCAmelCase: int=-1 , _lowerCAmelCase: Tuple=0 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: Union[str, Any]="gelu" , **_lowerCAmelCase: Any , ):
super().__init__(**_lowerCAmelCase )
lowercase :Tuple = hidden_size
lowercase :List[str] = num_hidden_layers
lowercase :Union[str, Any] = num_attention_heads
lowercase :Optional[int] = intermediate_size
lowercase :Optional[int] = hidden_act
lowercase :List[Any] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :List[str] = max_position_embeddings
lowercase :str = type_vocab_size
lowercase :Union[str, Any] = initializer_range
lowercase :str = relative_attention
lowercase :int = max_relative_positions
lowercase :Dict = pad_token_id
lowercase :Tuple = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
lowercase :List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
lowercase :Union[str, Any] = pos_att_type
lowercase :List[Any] = vocab_size
lowercase :int = layer_norm_eps
lowercase :Any = kwargs.get("pooler_hidden_size" , _lowerCAmelCase )
lowercase :Any = pooler_dropout
lowercase :List[str] = pooler_hidden_act
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: List[str] ):
if self.task == "multiple-choice":
lowercase :Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase :Any = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self: int ):
return 12
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 40 , _lowerCAmelCase: int = 40 , _lowerCAmelCase: "PreTrainedTokenizerBase" = None , ):
lowercase :str = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 158 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = StableDiffusionPanoramaPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self: int ):
torch.manual_seed(0 )
lowercase :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase :Any = DDIMScheduler()
torch.manual_seed(0 )
lowercase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase :Any = CLIPTextModel(_lowerCAmelCase )
lowercase :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase :Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :int = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Tuple = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :List[str] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self: int ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[str] = self.get_dummy_components()
lowercase :Optional[Any] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = "french fries"
lowercase :int = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase :int = output.images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Dict = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase :Union[str, Any] = output.images
lowercase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
lowercase :Tuple = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :int = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=_lowerCAmelCase )
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Tuple = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any]=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Optional[int] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :Any = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Tuple = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowerCAmelCase )
lowercase :int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = 0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
lowercase :Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Any = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase :str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Optional[Any] = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase :int = False
lowercase :Tuple = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self: str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase :Optional[Any] = "stabilityai/stable-diffusion-2-base"
lowercase :Dict = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase :Optional[int] = self.get_inputs()
lowercase :Union[str, Any] = pipe(**_lowerCAmelCase )
lowercase :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 158 | 1 |
import math
import unittest
def UpperCamelCase ( __lowerCamelCase : int ):
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 59 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59 | 1 |
"""simple docstring"""
import torch
from torch import nn
class _snake_case ( nn.Module):
def __init__( self : Optional[Any], __lowercase : Tuple, __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[Any], __lowercase : Dict=1, __lowercase : Dict=False ):
super().__init__()
lowercase__ = n_token
lowercase__ = d_embed
lowercase__ = d_proj
lowercase__ = cutoffs + [n_token]
lowercase__ = [0] + self.cutoffs
lowercase__ = div_val
lowercase__ = self.cutoffs[0]
lowercase__ = len(self.cutoffs ) - 1
lowercase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase__ = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
lowercase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase__ = nn.ModuleList()
lowercase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase, __UpperCAmelCase ) ) )
else:
self.out_projs.append(__UpperCAmelCase )
self.out_layers.append(nn.Linear(__UpperCAmelCase, __UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase, __UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(__UpperCAmelCase, r_idx - l_idx ) )
lowercase__ = keep_order
def A__ ( self : Dict, __lowercase : List[str], __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[str] ):
if proj is None:
lowercase__ = nn.functional.linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase__ = nn.functional.linear(__UpperCAmelCase, proj.t().contiguous() )
lowercase__ = nn.functional.linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A__ ( self : Dict, __lowercase : int, __lowercase : List[str]=None, __lowercase : List[Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase__ = hidden[..., :-1, :].contiguous()
lowercase__ = labels[..., 1:].contiguous()
lowercase__ = hidden.view(-1, hidden.size(-1 ) )
lowercase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
lowercase__ = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase__ = self._compute_logit(__UpperCAmelCase, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
lowercase__ = labels != -100
lowercase__ = torch.zeros_like(__UpperCAmelCase, dtype=hidden.dtype, device=hidden.device )
lowercase__ = (
-nn.functional.log_softmax(__UpperCAmelCase, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase__ = nn.functional.log_softmax(__UpperCAmelCase, dim=-1 )
else:
# construct weights and biases
lowercase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ = self.out_layers[i].weight
lowercase__ = self.out_layers[i].bias
if i == 0:
lowercase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
lowercase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
lowercase__ = weights[0], biases[0], self.out_projs[0]
lowercase__ = self._compute_logit(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
lowercase__ = nn.functional.log_softmax(__UpperCAmelCase, dim=1 )
if labels is None:
lowercase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase__ = torch.zeros_like(__UpperCAmelCase, dtype=hidden.dtype, device=hidden.device )
lowercase__ = 0
lowercase__ = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
lowercase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase__ = (labels >= l_idx) & (labels < r_idx)
lowercase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase__ = labels.index_select(0, __UpperCAmelCase ) - l_idx
lowercase__ = head_logprob.index_select(0, __UpperCAmelCase )
lowercase__ = hidden.index_select(0, __UpperCAmelCase )
else:
lowercase__ = hidden
if i == 0:
if labels is not None:
lowercase__ = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
lowercase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ = weights[i], biases[i], self.out_projs[i]
lowercase__ = self._compute_logit(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
lowercase__ = nn.functional.log_softmax(__UpperCAmelCase, dim=1 )
lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
lowercase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase__ = logprob_i
if labels is not None:
if (hasattr(self, "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0, __UpperCAmelCase, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A__ ( self : Any, __lowercase : int ):
if self.n_clusters == 0:
lowercase__ = self._compute_logit(__UpperCAmelCase, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(__UpperCAmelCase, dim=-1 )
else:
# construct weights and biases
lowercase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ = self.out_layers[i].weight
lowercase__ = self.out_layers[i].bias
if i == 0:
lowercase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
lowercase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
lowercase__ = weights[0], biases[0], self.out_projs[0]
lowercase__ = self._compute_logit(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
lowercase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase__ = nn.functional.log_softmax(__UpperCAmelCase, dim=1 )
lowercase__ = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
lowercase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ = weights[i], biases[i], self.out_projs[i]
lowercase__ = self._compute_logit(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
lowercase__ = nn.functional.log_softmax(__UpperCAmelCase, dim=1 )
lowercase__ = head_logprob[:, -i] + tail_logprob_i
lowercase__ = logprob_i
return out
| 366 |
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = UNetaDModel(
sample_size=(3_2, 6_4), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(1_2_8, 1_2_8), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase : List[str] = UNetaDConditionModel(
sample_size=(6_4, 3_2), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(1_2_8, 1_2_8), down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), cross_attention_dim=1_0, )
return model
@property
def __magic_name__ ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase : List[str] = AutoencoderKL(
sample_size=(1_2_8, 6_4), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(1_2_8, 1_2_8), down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D'''), up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D'''), )
UpperCAmelCase : List[Any] = UNetaDModel(
sample_size=(6_4, 3_2), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(1_2_8, 1_2_8), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return vqvae, unet
@slow
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase : Union[str, Any] = DDPMScheduler()
UpperCAmelCase : List[Any] = AudioDiffusionPipeline(vqvae=lowerCamelCase__, unet=self.dummy_unet, mel=lowerCamelCase__, scheduler=lowerCamelCase__ )
UpperCAmelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCAmelCase : List[Any] = pipe(generator=lowerCamelCase__, steps=4 )
UpperCAmelCase : Dict = output.audios[0]
UpperCAmelCase : str = output.images[0]
UpperCAmelCase : Tuple = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCAmelCase : str = pipe(generator=lowerCamelCase__, steps=4, return_dict=lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase : List[Any] = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:1_0]
UpperCAmelCase : Optional[int] = np.frombuffer(image_from_tuple.tobytes(), dtype='''uint8''' )[:1_0]
UpperCAmelCase : Optional[Any] = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase : Optional[int] = DDIMScheduler()
UpperCAmelCase : List[str] = self.dummy_vqvae_and_unet
UpperCAmelCase : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=lowerCamelCase__, scheduler=lowerCamelCase__ )
UpperCAmelCase : List[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase : Any = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCAmelCase : Any = pipe(raw_audio=lowerCamelCase__, generator=lowerCamelCase__, start_step=5, steps=1_0 )
UpperCAmelCase : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase : Dict = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:1_0]
UpperCAmelCase : Tuple = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase : int = self.dummy_unet_condition
UpperCAmelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=lowerCamelCase__, mel=lowerCamelCase__, scheduler=lowerCamelCase__ )
UpperCAmelCase : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCAmelCase : List[str] = torch.rand((1, 1, 1_0) )
UpperCAmelCase : Optional[Any] = pipe(generator=lowerCamelCase__, encoding=lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = output.images[0]
UpperCAmelCase : Tuple = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:1_0]
UpperCAmelCase : int = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = torch_device
UpperCAmelCase : Tuple = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCAmelCase : List[Any] = pipe(generator=lowerCamelCase__ )
UpperCAmelCase : List[Any] = output.audios[0]
UpperCAmelCase : str = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase : Any = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:1_0]
UpperCAmelCase : Optional[Any] = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 336 |
'''simple docstring'''
def _A ( lowercase__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 164 | 0 |
class _snake_case :
def __init__( self ):
__magic_name__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__magic_name__ : str = False
def SCREAMING_SNAKE_CASE ( self , _a ):
for word in words:
self.insert(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[str] = self
for char in word:
if char not in curr.nodes:
__magic_name__ : str = TrieNode()
__magic_name__ : Any = curr.nodes[char]
__magic_name__ : str = True
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : int = self
for char in word:
if char not in curr.nodes:
return False
__magic_name__ : Any = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE ( self , _a ):
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
__magic_name__ : Tuple = False
return len(curr.nodes ) == 0
__magic_name__ : Union[str, Any] = word[index]
__magic_name__ : List[Any] = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__magic_name__ : List[Any] = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( _snake_case : TrieNode , _snake_case : str ) -> None:
'''simple docstring'''
if node.is_leaf:
print(_snake_case , end=" " )
for key, value in node.nodes.items():
print_words(_snake_case , word + key )
def lowerCAmelCase_ ( ) -> bool:
'''simple docstring'''
__magic_name__ : Dict = "banana bananas bandana band apple all beast".split()
__magic_name__ : Optional[Any] = TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( _snake_case : str , _snake_case : bool ) -> None:
'''simple docstring'''
print(str(_snake_case ) , "works!" if passes else "doesn't work :(" )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 41 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__magic_name__ : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case )
if matches:
__magic_name__ : List[str] = float(matches[1] )
__magic_name__ : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ : List[str] = 1001
__magic_name__ : Tuple = "imagenet-1k-id2label.json"
__magic_name__ : Union[str, Any] = "huggingface/label-files"
__magic_name__ : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : Tuple = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
__magic_name__ : Dict = "background"
__magic_name__ : str = idalabel
__magic_name__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
__magic_name__ : List[Any] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ : Dict = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__magic_name__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
__magic_name__ : Any = model(**_snake_case )
__magic_name__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ : Optional[Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__magic_name__ : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__magic_name__ : List[str] = "google/" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41 | 1 |
"""simple docstring"""
import os
from math import logaa
def lowercase ( __snake_case : int = "base_exp.txt" ):
lowercase_ : float = 0
lowercase_ : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
lowercase_ : Dict = list(map(__snake_case , line.split(''',''' ) ) )
if x * logaa(__snake_case ) > largest:
lowercase_ : Tuple = x * logaa(__snake_case )
lowercase_ : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 33 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _a ):
snake_case__ : Union[str, Any] = ["""image_processor""", """tokenizer"""]
snake_case__ : Union[str, Any] = """LayoutLMv3ImageProcessor"""
snake_case__ : Any = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
a_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
a_ : Union[str, Any] = kwargs.pop('feature_extractor' )
a_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : Optional[int] = False , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 0 , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = False , SCREAMING_SNAKE_CASE__ : Optional[Any] = False , SCREAMING_SNAKE_CASE__ : Optional[Any] = False , SCREAMING_SNAKE_CASE__ : List[Any] = True , SCREAMING_SNAKE_CASE__ : Dict = None , **SCREAMING_SNAKE_CASE__ : int , ) -> Union[str, Any]:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
a_ : Union[str, Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a_ : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
a_ : Dict = features['words']
a_ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
a_ : Union[str, Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
a_ : Union[str, Any] = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
a_ : Optional[Any] = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
a_ : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 371 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase__ ):
def __init__( self : Dict , *_lowercase : Optional[Any] , **_lowercase : int ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 332 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32 | 0 |
"""simple docstring"""
from __future__ import annotations
_A = list[list[int]]
# assigning initial values to the grid
_A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( __UpperCAmelCase ) -> str:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
if location := find_empty_location(lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : int = digit
if sudoku(lowerCAmelCase__ ) is not None:
return grid
lowerCAmelCase__ : Optional[Any] = 0
return None
def lowercase_ ( __UpperCAmelCase ) -> Tuple:
for row in grid:
for cell in row:
print(lowerCAmelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
_A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 366 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = 3_8_4
if "tiny" in model_name:
__lowerCAmelCase : Optional[Any] = [3, 3, 9, 3]
__lowerCAmelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
__lowerCAmelCase : str = [3, 3, 2_7, 3]
__lowerCAmelCase : str = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
__lowerCAmelCase : str = [3, 3, 2_7, 3]
__lowerCAmelCase : List[str] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
__lowerCAmelCase : Dict = 5_1_2
if "large" in model_name:
__lowerCAmelCase : List[str] = [3, 3, 2_7, 3]
__lowerCAmelCase : Dict = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
__lowerCAmelCase : Union[str, Any] = 7_6_8
if "xlarge" in model_name:
__lowerCAmelCase : Any = [3, 3, 2_7, 3]
__lowerCAmelCase : str = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
__lowerCAmelCase : str = 1_0_2_4
# set label information
__lowerCAmelCase : Optional[Any] = 1_5_0
__lowerCAmelCase : int = '''huggingface/label-files'''
__lowerCAmelCase : str = '''ade20k-id2label.json'''
__lowerCAmelCase : Tuple = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : str = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : str = ConvNextConfig(
depths=lowercase__ , hidden_sizes=lowercase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowerCAmelCase : List[Any] = UperNetConfig(
backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = dct.pop(lowercase__ )
__lowerCAmelCase : List[str] = val
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__lowerCAmelCase : str = model_name_to_url[model_name]
__lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' )['''state_dict''']
__lowerCAmelCase : Optional[int] = get_upernet_config(lowercase__ )
__lowerCAmelCase : List[Any] = UperNetForSemanticSegmentation(lowercase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase : str = state_dict.pop(lowercase__ )
if "bn" in key:
__lowerCAmelCase : Tuple = key.replace('''bn''' , '''batch_norm''' )
__lowerCAmelCase : Dict = val
# rename keys
__lowerCAmelCase : Tuple = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# verify on image
__lowerCAmelCase : Dict = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowerCAmelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
__lowerCAmelCase : Optional[int] = SegformerImageProcessor()
__lowerCAmelCase : List[str] = processor(lowercase__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowerCAmelCase : int = model(lowercase__ )
if model_name == "upernet-convnext-tiny":
__lowerCAmelCase : str = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
__lowerCAmelCase : List[str] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
__lowerCAmelCase : int = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
__lowerCAmelCase : List[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 275 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase (unittest.TestCase ):
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.dummy_uncond_unet
__lowerCAmelCase : Any = PNDMScheduler()
__lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0]
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32'''
__lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ )
__lowerCAmelCase : int = PNDMScheduler()
__lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = torch.manual_seed(0 )
__lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 275 | 1 |
'''simple docstring'''
import functools
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = len(A__ )
__lowercase = len(A__ )
@functools.cache
def min_distance(A__ , A__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowercase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A__ ) , 1 + min_distance(A__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 52 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''efficientformer'''
def __init__( self : Optional[int] ,_a : List[int] = [3, 2, 6, 4] ,_a : List[int] = [48, 96, 224, 448] ,_a : List[bool] = [True, True, True, True] ,_a : int = 448 ,_a : int = 32 ,_a : int = 4 ,_a : int = 7 ,_a : int = 5 ,_a : int = 8 ,_a : int = 4 ,_a : float = 0.0 ,_a : int = 16 ,_a : int = 3 ,_a : int = 3 ,_a : int = 3 ,_a : int = 2 ,_a : int = 1 ,_a : float = 0.0 ,_a : int = 1 ,_a : bool = True ,_a : bool = True ,_a : float = 1E-5 ,_a : str = "gelu" ,_a : float = 0.02 ,_a : float = 1E-12 ,_a : int = 224 ,_a : float = 1E-05 ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Union[str, Any] = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Optional[int] = hidden_sizes
_a : int = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : str = initializer_range
_a : List[str] = layer_norm_eps
_a : List[Any] = patch_size
_a : Optional[int] = num_channels
_a : List[str] = depths
_a : int = mlp_expansion_ratio
_a : List[Any] = downsamples
_a : Any = dim
_a : Tuple = key_dim
_a : Union[str, Any] = attention_ratio
_a : Union[str, Any] = resolution
_a : Union[str, Any] = pool_size
_a : Tuple = downsample_patch_size
_a : Tuple = downsample_stride
_a : str = downsample_pad
_a : str = drop_path_rate
_a : List[str] = num_metaad_blocks
_a : Any = distillation
_a : Any = use_layer_scale
_a : Optional[Any] = layer_scale_init_value
_a : str = image_size
_a : List[str] = batch_norm_eps
| 271 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 |
"""simple docstring"""
import sys
import turtle
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_A = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 166 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_features"]
def __init__( self : Optional[int] ,lowerCamelCase__ : Any=80 ,lowerCamelCase__ : Optional[Any]=16_000 ,lowerCamelCase__ : List[Any]=160 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : int=400 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
super().__init__(
feature_size=lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,padding_value=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = n_fft
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = chunk_length
UpperCAmelCase__ = chunk_length * sampling_rate
UpperCAmelCase__ = self.n_samples // hop_length
UpperCAmelCase__ = sampling_rate
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=lowerCamelCase__ ,min_frequency=0.0 ,max_frequency=8_0_0_0.0 ,sampling_rate=lowerCamelCase__ ,norm='slaney' ,mel_scale='slaney' ,)
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : np.array ):
UpperCAmelCase__ = spectrogram(
lowerCamelCase__ ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,)
UpperCAmelCase__ = log_spec[:, :-1]
UpperCAmelCase__ = np.maximum(lowerCamelCase__ ,log_spec.max() - 8.0 )
UpperCAmelCase__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : float = 0.0 ):
if attention_mask is not None:
UpperCAmelCase__ = np.array(lowerCamelCase__ ,np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(lowerCamelCase__ ,attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : int ,lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[str] = "max_length" ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,**lowerCamelCase__ : Dict ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCAmelCase__ = isinstance(lowerCamelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(lowerCamelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ ,np.ndarray ):
UpperCAmelCase__ = np.asarray(lowerCamelCase__ ,dtype=np.floataa )
elif isinstance(lowerCamelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [np.asarray([raw_speech] ).T]
UpperCAmelCase__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
UpperCAmelCase__ = self.pad(
lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=max_length if max_length else self.n_samples ,truncation=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,)
UpperCAmelCase__ = np.stack(padded_inputs['input_features'] ,axis=0 )
# make sure list is in array format
UpperCAmelCase__ = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 )
UpperCAmelCase__ = [self._np_extract_fbank_features(lowerCamelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] ,lowerCamelCase__ ):
UpperCAmelCase__ = [np.asarray(lowerCamelCase__ ,dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase__ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 98 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4 | 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'mobilenet_v2'
def __init__( self : Any , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=224 , lowercase_ : Optional[int]=1.0 , lowercase_ : Any=8 , lowercase_ : Optional[int]=8 , lowercase_ : Tuple=6 , lowercase_ : str=32 , lowercase_ : int=True , lowercase_ : Any=True , lowercase_ : Tuple="relu6" , lowercase_ : List[str]=True , lowercase_ : Tuple=0.8 , lowercase_ : Any=0.0_2 , lowercase_ : Dict=0.0_0_1 , lowercase_ : List[str]=255 , **lowercase_ : Any , ):
super().__init__(**lowercase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
UpperCamelCase__ : int =num_channels
UpperCamelCase__ : str =image_size
UpperCamelCase__ : Optional[Any] =depth_multiplier
UpperCamelCase__ : Optional[int] =depth_divisible_by
UpperCamelCase__ : List[str] =min_depth
UpperCamelCase__ : Union[str, Any] =expand_ratio
UpperCamelCase__ : Union[str, Any] =output_stride
UpperCamelCase__ : int =first_layer_is_expansion
UpperCamelCase__ : Any =finegrained_output
UpperCamelCase__ : Dict =hidden_act
UpperCamelCase__ : Optional[Any] =tf_padding
UpperCamelCase__ : Union[str, Any] =classifier_dropout_prob
UpperCamelCase__ : int =initializer_range
UpperCamelCase__ : Tuple =layer_norm_eps
UpperCamelCase__ : str =semantic_loss_ignore_index
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def _lowerCAmelCase ( self : Any ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def _lowerCAmelCase ( self : List[Any] ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def _lowerCAmelCase ( self : int ):
return 1e-4
| 157 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Any =argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int="eval" ):
'''simple docstring'''
UpperCamelCase__ : Dict =os.path.join(UpperCAmelCase , F'''{split}_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
return json.load(UpperCAmelCase )
raise ValueError(F'''can\'t find {path}''' )
_SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_glue.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_clm_flax.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] =f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_summarization_flax.main()
UpperCamelCase__ : Union[str, Any] =get_results(lowercase_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_mlm_flax.main()
UpperCamelCase__ : List[Any] =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _lowerCAmelCase ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : Union[str, Any] =7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_ner.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_qa.main()
UpperCamelCase__ : Tuple =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 157 | 1 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return 1 if input_a == input_a else 0
def __snake_case ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 55 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.0 , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = "layer_norm" , snake_case__ = False , ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[Any] =only_cross_attention
UpperCAmelCase : Optional[Any] =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
UpperCAmelCase : Dict =(num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase : List[Any] =AdaLayerNorm(a_ , a_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase : List[str] =AdaLayerNormZero(a_ , a_ )
else:
UpperCAmelCase : List[str] =nn.LayerNorm(a_ , elementwise_affine=a_ )
UpperCAmelCase : List[Any] =Attention(
query_dim=a_ , heads=a_ , dim_head=a_ , dropout=a_ , bias=a_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase : Union[str, Any] =(
AdaLayerNorm(a_ , a_ )
if self.use_ada_layer_norm
else nn.LayerNorm(a_ , elementwise_affine=a_ )
)
UpperCAmelCase : Union[str, Any] =Attention(
query_dim=a_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a_ , dim_head=a_ , dropout=a_ , bias=a_ , upcast_attention=a_ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase : Dict =None
UpperCAmelCase : Optional[int] =None
# 3. Feed-forward
UpperCAmelCase : Optional[Any] =nn.LayerNorm(a_ , elementwise_affine=a_ )
UpperCAmelCase : Union[str, Any] =FeedForward(a_ , dropout=a_ , activation_fn=a_ , final_dropout=a_ )
# let chunk size default to None
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Any =0
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int =chunk_size
UpperCAmelCase : Any =dim
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ) -> Union[str, Any]:
'''simple docstring'''
if self.use_ada_layer_norm:
UpperCAmelCase : str =self.norma(a_ , a_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase : Any =self.norma(
a_ , a_ , a_ , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase : Dict =self.norma(a_ )
UpperCAmelCase : Optional[Any] =cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase : Union[str, Any] =self.attna(
a_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a_ , **a_ , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : Tuple =gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase : Union[str, Any] =attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase : List[str] =(
self.norma(a_ , a_ ) if self.use_ada_layer_norm else self.norma(a_ )
)
UpperCAmelCase : Tuple =self.attna(
a_ , encoder_hidden_states=a_ , attention_mask=a_ , **a_ , )
UpperCAmelCase : str =attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase : Union[str, Any] =self.norma(a_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : List[str] =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
UpperCAmelCase : str =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase : Dict =torch.cat(
[self.ff(a_ ) for hid_slice in norm_hidden_states.chunk(a_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase : str =self.ff(a_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase : Dict =gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase : List[str] =ff_output + hidden_states
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = "geglu" , snake_case__ = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Dict =int(dim * mult )
UpperCAmelCase : Union[str, Any] =dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase : Optional[int] =GELU(a_ , a_ )
if activation_fn == "gelu-approximate":
UpperCAmelCase : int =GELU(a_ , a_ , approximate='''tanh''' )
elif activation_fn == "geglu":
UpperCAmelCase : List[Any] =GEGLU(a_ , a_ )
elif activation_fn == "geglu-approximate":
UpperCAmelCase : Dict =ApproximateGELU(a_ , a_ )
UpperCAmelCase : Any =nn.ModuleList([] )
# project in
self.net.append(a_ )
# project dropout
self.net.append(nn.Dropout(a_ ) )
# project out
self.net.append(nn.Linear(a_ , a_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a_ ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
for module in self.net:
UpperCAmelCase : str =module(a_ )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ = "none" ) -> int:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Union[str, Any] =nn.Linear(a_ , a_ )
UpperCAmelCase : Tuple =approximate
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(a_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.proj(a_ )
UpperCAmelCase : str =self.gelu(a_ )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[str] =nn.Linear(a_ , dim_out * 2 )
def UpperCAmelCase__ ( self , snake_case__ ) -> Any:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(a_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.proj(a_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a_ )
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Union[str, Any] =nn.Linear(a_ , a_ )
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.proj(a_ )
return x * torch.sigmoid(1.702 * x )
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =nn.Embedding(a_ , a_ )
UpperCAmelCase : Optional[int] =nn.SiLU()
UpperCAmelCase : Dict =nn.Linear(a_ , embedding_dim * 2 )
UpperCAmelCase : int =nn.LayerNorm(a_ , elementwise_affine=a_ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.linear(self.silu(self.emb(a_ ) ) )
UpperCAmelCase : Union[str, Any] =torch.chunk(a_ , 2 )
UpperCAmelCase : Dict =self.norm(a_ ) * (1 + scale) + shift
return x
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[int] =CombinedTimestepLabelEmbeddings(a_ , a_ )
UpperCAmelCase : Union[str, Any] =nn.SiLU()
UpperCAmelCase : Union[str, Any] =nn.Linear(a_ , 6 * embedding_dim , bias=a_ )
UpperCAmelCase : List[Any] =nn.LayerNorm(a_ , elementwise_affine=a_ , eps=1e-6 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.linear(self.silu(self.emb(a_ , a_ , hidden_dtype=a_ ) ) )
UpperCAmelCase : Optional[int] =emb.chunk(6 , dim=1 )
UpperCAmelCase : str =self.norm(a_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __snake_case ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 1e-5 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[str] =num_groups
UpperCAmelCase : int =eps
if act_fn is None:
UpperCAmelCase : List[str] =None
else:
UpperCAmelCase : int =get_activation(a_ )
UpperCAmelCase : Any =nn.Linear(a_ , out_dim * 2 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
if self.act:
UpperCAmelCase : Dict =self.act(a_ )
UpperCAmelCase : List[str] =self.linear(a_ )
UpperCAmelCase : int =emb[:, :, None, None]
UpperCAmelCase : Union[str, Any] =emb.chunk(2 , dim=1 )
UpperCAmelCase : Any =F.group_norm(a_ , self.num_groups , eps=self.eps )
UpperCAmelCase : Any =x * (1 + scale) + shift
return x
| 352 | import sys
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
UpperCAmelCase : List[str] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
UpperCAmelCase : List[Any] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase : str =a + chain_length - 1
UpperCAmelCase : Union[str, Any] =sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : List[Any] =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase : Optional[Any] =cost
UpperCAmelCase : Dict =c
return matrix, sol
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =[30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase , UpperCAmelCase : Optional[int] =matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 78 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Optional[Any] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__A : Dict = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__A : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__A : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__A : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__A : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__A : str = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def lowercase ( ):
lowercase_ , lowercase_ : Optional[int] = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
lowercase_ : Optional[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase_ , lowercase_ : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase ( __snake_case : Dict = 1_0_0 ):
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase_ )
def lowercase ( __snake_case : Any , __snake_case : Optional[int] ):
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase_ )
def lowercase ( __snake_case : str , __snake_case : str ):
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , UpperCamelCase_ )
def lowercase ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict ):
lowercase_ : int = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase_ )
def lowercase ( __snake_case : Union[str, Any] , __snake_case : int ):
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase_ )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , UpperCamelCase_ )
def lowercase ( __snake_case : Dict , __snake_case : Tuple , __snake_case : Tuple ):
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowercase ( __snake_case : str , __snake_case : Tuple , __snake_case : str ):
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase ( ):
lowercase_ : Union[str, Any] = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
lowercase_ : Dict = poker_hands.copy()
shuffle(UpperCamelCase_ )
lowercase_ : Any = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase ( ):
# Test that five high straights are compared correctly.
lowercase_ : int = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase_ : Optional[Any] = PokerHand('''2C 4S AS 3D 5C''' )
lowercase_ : List[Any] = True
lowercase_ : Dict = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase_ : Union[str, Any] = 0
lowercase_ : Dict = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowercase_ : Optional[Any] = os.path.join(UpperCamelCase_ , '''poker_hands.txt''' )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
lowercase_ : int = line[:1_4].strip()
lowercase_ : Optional[int] = line[1_5:].strip()
lowercase_ , lowercase_ : Tuple = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
lowercase_ : List[Any] = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 33 |
"""simple docstring"""
from collections import defaultdict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = first_str.lower().strip()
__SCREAMING_SNAKE_CASE = second_str.lower().strip()
# Remove whitespace
__SCREAMING_SNAKE_CASE = first_str.replace(""" """ , """""" )
__SCREAMING_SNAKE_CASE = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
# Default values for count should be 0
__SCREAMING_SNAKE_CASE = defaultdict(UpperCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ = input("Enter the first string ").strip()
__magic_name__ = input("Enter the second string ").strip()
__magic_name__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 100 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] =logging.get_logger(__name__)
__snake_case : Optional[int] ={
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : List[str] =[
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
__snake_case : Dict =[
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""whisper"""
snake_case_ =["""past_key_values"""]
snake_case_ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self ,__lowerCamelCase=5_18_65 ,__lowerCamelCase=80 ,__lowerCamelCase=6 ,__lowerCamelCase=4 ,__lowerCamelCase=6 ,__lowerCamelCase=4 ,__lowerCamelCase=15_36 ,__lowerCamelCase=15_36 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=5_02_57 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase="gelu" ,__lowerCamelCase=2_56 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=False ,__lowerCamelCase=15_00 ,__lowerCamelCase=4_48 ,__lowerCamelCase=5_02_56 ,__lowerCamelCase=5_02_56 ,__lowerCamelCase=5_02_56 ,__lowerCamelCase=None ,__lowerCamelCase=[2_20, 5_02_56] ,__lowerCamelCase=False ,__lowerCamelCase=2_56 ,__lowerCamelCase=False ,__lowerCamelCase=0.05 ,__lowerCamelCase=10 ,__lowerCamelCase=2 ,__lowerCamelCase=0.0 ,__lowerCamelCase=10 ,__lowerCamelCase=0 ,__lowerCamelCase=7 ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Tuple = num_mel_bins
lowerCAmelCase__ : Optional[int] = d_model
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : str = encoder_attention_heads
lowerCAmelCase__ : Optional[Any] = decoder_layers
lowerCAmelCase__ : Optional[int] = decoder_attention_heads
lowerCAmelCase__ : List[Any] = decoder_ffn_dim
lowerCAmelCase__ : int = encoder_ffn_dim
lowerCAmelCase__ : Dict = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : List[str] = activation_dropout
lowerCAmelCase__ : List[str] = activation_function
lowerCAmelCase__ : Optional[Any] = init_std
lowerCAmelCase__ : Any = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : Union[str, Any] = encoder_layers
lowerCAmelCase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase__ : List[Any] = max_source_positions
lowerCAmelCase__ : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ : Any = classifier_proj_size
lowerCAmelCase__ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : int = apply_spec_augment
lowerCAmelCase__ : List[Any] = mask_time_prob
lowerCAmelCase__ : Optional[Any] = mask_time_length
lowerCAmelCase__ : Optional[int] = mask_time_min_masks
lowerCAmelCase__ : Union[str, Any] = mask_feature_prob
lowerCAmelCase__ : Optional[int] = mask_feature_length
lowerCAmelCase__ : Union[str, Any] = mask_feature_min_masks
lowerCAmelCase__ : Any = median_filter_width
super().__init__(
pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,is_encoder_decoder=snake_case__ ,decoder_start_token_id=snake_case__ ,suppress_tokens=snake_case__ ,begin_suppress_tokens=snake_case__ ,**snake_case__ ,)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@property
def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Tuple = {0: '''batch'''}
else:
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ ,direction='''inputs''' )
return common_inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = -1 ,__lowerCamelCase = -1 ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = 2_20_50 ,__lowerCamelCase = 5.0 ,__lowerCamelCase = 2_20 ,) -> Mapping[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = OrderedDict()
lowerCAmelCase__ : Tuple = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=snake_case__ ,framework=snake_case__ ,sampling_rate=snake_case__ ,time_duration=snake_case__ ,frequency=snake_case__ ,)
lowerCAmelCase__ : str = encoder_inputs['''input_features'''].shape[2]
lowerCAmelCase__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase__ : Union[str, Any] = super().generate_dummy_inputs(
preprocessor.tokenizer ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
lowerCAmelCase__ : Dict = encoder_inputs.pop('''input_features''' )
lowerCAmelCase__ : Tuple = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase__ : List[Any] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def lowerCAmelCase__ (self ) -> float:
"""simple docstring"""
return 1e-3
| 362 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Any=True ,lowerCamelCase_ : Tuple="pt"):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {'''add_prefix_space''': True} if isinstance(lowerCamelCase_ ,lowerCamelCase_) and not line.startswith(''' ''') else {}
lowerCAmelCase__ : Union[str, Any] = padding_side
return tokenizer(
[line] ,max_length=lowerCamelCase_ ,padding='''max_length''' if pad_to_max_length else None ,truncation=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ ,)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Any=None ,):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = input_ids.ne(lowerCamelCase_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="train" ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase="" ,) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = Path(__lowerCamelCase ).joinpath(type_path + '''.source''' )
lowerCAmelCase__ : int = Path(__lowerCamelCase ).joinpath(type_path + '''.target''' )
lowerCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
lowerCAmelCase__ : Dict = max_source_length
lowerCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCAmelCase__ : Tuple = tokenizer
lowerCAmelCase__ : List[Any] = prefix
if n_obs is not None:
lowerCAmelCase__ : Optional[Any] = self.src_lens[:n_obs]
lowerCAmelCase__ : Any = src_lang
lowerCAmelCase__ : Optional[Any] = tgt_lang
def __len__(self ) -> str:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = index + 1 # linecache starts at 1
lowerCAmelCase__ : Any = self.prefix + linecache.getline(str(self.src_file ) ,__lowerCamelCase ).rstrip('''\n''' )
lowerCAmelCase__ : Any = linecache.getline(str(self.tgt_file ) ,__lowerCamelCase ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase__ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
)
lowerCAmelCase__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,__lowerCamelCase ) else self.tokenizer
lowerCAmelCase__ : Union[str, Any] = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_source_length ,'''right''' )
lowerCAmelCase__ : Any = encode_line(__lowerCamelCase ,__lowerCamelCase ,self.max_target_length ,'''right''' )
lowerCAmelCase__ : List[str] = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : str = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase__ : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase__ : Union[str, Any] = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ : Dict = trim_batch(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = trim_batch(__lowerCamelCase ,__lowerCamelCase ,attention_mask=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__snake_case : Any =getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : List[List]):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : int = get_git_info()
save_json(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,'''git_log.json'''))
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Tuple=4 ,**lowerCamelCase_ : List[str]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''w''') as f:
json.dump(lowerCamelCase_ ,lowerCamelCase_ ,indent=lowerCamelCase_ ,**lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
with open(lowerCamelCase_) as f:
return json.load(lowerCamelCase_)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : str = git.Repo(search_parent_directories=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {
'''repo_id''': str(lowerCamelCase_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCAmelCase__ ( lowerCamelCase_ : Callable ,lowerCamelCase_ : Iterable):
'''simple docstring'''
return list(map(lowerCamelCase_ ,lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''wb''') as f:
return pickle.dump(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
def remove_articles(lowerCamelCase_ : List[str]):
return re.sub(r'''\b(a|an|the)\b''' ,''' ''' ,lowerCamelCase_)
def white_space_fix(lowerCamelCase_ : Optional[int]):
return " ".join(text.split())
def remove_punc(lowerCamelCase_ : List[str]):
lowerCAmelCase__ : List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase_ : Optional[int]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_))))
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = normalize_answer(lowerCamelCase_).split()
lowerCAmelCase__ : str = Counter(lowerCamelCase_) & Counter(lowerCamelCase_)
lowerCAmelCase__ : Dict = sum(common.values())
if num_same == 0:
return 0
lowerCAmelCase__ : Optional[int] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any):
'''simple docstring'''
return normalize_answer(lowerCamelCase_) == normalize_answer(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : List[str] = 0
for hypo, pred in zip(lowerCamelCase_ ,lowerCamelCase_):
em += exact_match_score(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) > 0:
em /= len(lowerCamelCase_)
return {"em": em}
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return model_prefix.startswith('''rag''')
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_):
if not hasattr(lowerCamelCase_ ,lowerCamelCase_) and not hasattr(lowerCamelCase_ ,equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
continue
lowerCAmelCase__ : Dict = p if hasattr(lowerCamelCase_ ,lowerCamelCase_) else equivalent_param[p]
setattr(lowerCamelCase_ ,lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_))
delattr(lowerCamelCase_ ,lowerCamelCase_)
return hparams, config
| 94 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ : int = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 |
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
) | 225 | 1 |
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
UpperCamelCase : List[str] = len(snake_case__ )
UpperCamelCase : Union[str, Any] = max(snake_case__ )
UpperCamelCase : Optional[int] = min(snake_case__ )
# create the counting array
UpperCamelCase : Tuple = coll_max + 1 - coll_min
UpperCamelCase : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case__ ):
UpperCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case__ ) ):
UpperCamelCase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return "".join([chr(snake_case__ ) for i in counting_sort([ord(snake_case__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 365 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , ):
A__ : List[str] = parent
A__ : List[str] = 13
A__ : Union[str, Any] = 7
A__ : int = True
A__ : str = True
A__ : Optional[Any] = False
A__ : List[str] = True
A__ : List[str] = 99
A__ : List[str] = 32
A__ : Union[str, Any] = 2
A__ : Optional[int] = 4
A__ : Dict = 37
A__ : Union[str, Any] = """gelu"""
A__ : Union[str, Any] = 0.1
A__ : Union[str, Any] = 0.1
A__ : Union[str, Any] = 512
A__ : Optional[Any] = 16
A__ : Tuple = 2
A__ : Tuple = 0.0_2
A__ : Any = 3
A__ : Union[str, Any] = 4
A__ : int = None
def __A ( self ):
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[Any] = None
A__ : Optional[int] = None
A__ : Any = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = TFDistilBertModel(config=A__ )
A__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ : List[str] = model(A__ )
A__ : Union[str, Any] = [input_ids, input_mask]
A__ : Dict = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = TFDistilBertForMaskedLM(config=A__ )
A__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ : Union[str, Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Tuple = TFDistilBertForQuestionAnswering(config=A__ )
A__ : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
A__ : Any = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : List[Any] = self.num_labels
A__ : Optional[Any] = TFDistilBertForSequenceClassification(A__ )
A__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ : Tuple = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = self.num_choices
A__ : Dict = TFDistilBertForMultipleChoice(A__ )
A__ : Union[str, Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
A__ : Union[str, Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
A__ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
A__ : List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Tuple = self.num_labels
A__ : Union[str, Any] = TFDistilBertForTokenClassification(A__ )
A__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ : Dict = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
A__ : Union[str, Any] = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : List[str] = config_and_inputs
A__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase__: int = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Tuple = False
def __A ( self ):
A__ : Dict = TFDistilBertModelTester(self )
A__ : Any = ConfigTester(self , config_class=A__ , dim=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A__ )
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A__ )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A__ )
@slow
def __A ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ : str = TFDistilBertModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Any = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ : Any = model(A__ )[0]
A__ : Any = [1, 6, 768]
self.assertEqual(output.shape , A__ )
A__ : Union[str, Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1e-4 )
| 192 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : Tuple = 12_8022
A_ : Optional[Any] = 12_8028
@require_sentencepiece
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = MaMaaaTokenizer
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: Any = False
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
A__ : Union[str, Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = Path(self.tmpdirname )
save_json(A__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A__ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
return (
"This is a test",
"This is a test",
)
def __A ( self ):
A__ : Any = """</s>"""
A__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : str = self.get_tokenizer()
A__ : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def __A ( self ):
pass
def __A ( self ):
A__ : Optional[int] = self.get_tokenizer()
A__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [2, 3, 4, 5, 6] , )
A__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A__ : Any = tokenizer.convert_tokens_to_string(A__ )
self.assertEqual(A__ , """This is a test""" )
@slow
def __A ( self ):
# fmt: off
A__ : int = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''facebook/m2m100_418M'''
UpperCAmelCase__: Any = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__: Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__: List[str] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __A ( cls ):
A__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
A__ : int = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 )
def __A ( self ):
A__ : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A__ )
def __A ( self ):
A__ : List[Any] = """en"""
A__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Dict = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
A__ : Dict = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : str = tempfile.mkdtemp()
A__ : Dict = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A__ )
A__ : List[Any] = MaMaaaTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.lang_token_to_id , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = """en"""
A__ : List[str] = """fr"""
A__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors="""pt""" )
A__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __A ( self ):
A__ : List[str] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ : Any = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __A ( self ):
A__ : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ : Any = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __A ( self ):
A__ : Any = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} , )
| 192 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =0
lowerCamelCase__ : int =len(__lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ : List[Any] =i + 1
else:
lowerCamelCase__ : List[Any] =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 362 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowercase : List[str] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class __SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : str = " " )-> List[str]:
lowerCamelCase__ : List[str] =sentence_delimiter
def snake_case ( self : Any, lowerCamelCase : str )-> Optional[Any]:
return list(lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : List[str] )-> Tuple:
lowerCamelCase__ : Optional[int] =[]
for sent_idx, sentence in enumerate(lowerCamelCase ):
chars.extend(self.process_string(lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowercase : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowercase : Dict = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowercase : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_lowercase : Dict = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self : Dict )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
], )
def snake_case ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=False )-> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, )["wer"]
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Union[str, Any] =0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =jiwer.compute_measures(
lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 272 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''mobilenet_v1'''
def __init__( self , lowercase=3 , lowercase=2_2_4 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ) -> List[Any]:
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = depth_multiplier
__UpperCamelCase = min_depth
__UpperCamelCase = hidden_act
__UpperCamelCase = tf_padding
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''')
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __lowerCamelCase ( self ) -> float:
return 1E-4
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a = {'''bert_for_seq_generation''': 512}
class lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : str="<::::>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ):
_A = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self : List[Any] ):
_A = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Union[str, Any] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple ):
return self.sp_model.piece_to_id(UpperCamelCase_ )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ):
_A = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Any ):
_A = []
_A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
_A = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 355 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_snake_case )
_A = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_snake_case )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_snake_case )
else:
_A = XLNetLMHeadModel(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
_A = os.path.join(_snake_case , _snake_case )
_A = os.path.join(_snake_case , _snake_case )
print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' )
torch.save(model.state_dict() , _snake_case )
print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 271 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """encodec"""
def __init__( self : Any , __UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCAmelCase : str=24000 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=128 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : str=1 , __UpperCAmelCase : str=[8, 5, 4, 2] , __UpperCAmelCase : Union[str, Any]="weight_norm" , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=True , __UpperCAmelCase : str="reflect" , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=2 , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=True , **__UpperCAmelCase : Dict , ):
a : Any = target_bandwidths
a : str = sampling_rate
a : Union[str, Any] = audio_channels
a : Optional[int] = normalize
a : Optional[Any] = chunk_length_s
a : str = overlap
a : List[str] = hidden_size
a : List[str] = num_filters
a : Union[str, Any] = num_residual_layers
a : Tuple = upsampling_ratios
a : Tuple = norm_type
a : List[str] = kernel_size
a : Tuple = last_kernel_size
a : Union[str, Any] = residual_kernel_size
a : Union[str, Any] = dilation_growth_rate
a : Any = use_causal_conv
a : str = pad_mode
a : Dict = compress
a : Union[str, Any] = num_lstm_layers
a : List[Any] = trim_right_ratio
a : str = codebook_size
a : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
a : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : List[str]):
a : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : List[str]):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 40 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = 'pascal-voc-id2label.json'
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Union[str, Any]:
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace('.block.' , '.' )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = 'mobilevit.' + name
return name
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any:
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'mobilevit.'
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase () -> Optional[Any]:
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> List[str]:
SCREAMING_SNAKE_CASE = get_mobilevit_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='apple' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='apple' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 38 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> bool:
SCREAMING_SNAKE_CASE = int(number**0.5 )
return number == sq * sq
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase (SCREAMING_SNAKE_CASE_ : int = 35 ) -> int:
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0 )
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 38 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'swin'
lowerCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any],lowercase_ : Union[str, Any]=2_2_4,lowercase_ : List[str]=4,lowercase_ : int=3,lowercase_ : int=9_6,lowercase_ : Optional[Any]=[2, 2, 6, 2],lowercase_ : Optional[Any]=[3, 6, 1_2, 2_4],lowercase_ : List[Any]=7,lowercase_ : List[Any]=4.0,lowercase_ : List[str]=True,lowercase_ : Union[str, Any]=0.0,lowercase_ : Dict=0.0,lowercase_ : str=0.1,lowercase_ : List[Any]="gelu",lowercase_ : Any=False,lowercase_ : Optional[Any]=0.02,lowercase_ : List[str]=1E-5,lowercase_ : Any=3_2,lowercase_ : Tuple=None,lowercase_ : Tuple=None,**lowercase_ : List[Any],)-> Dict:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(lowercase_ )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : str )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Optional[Any] )-> float:
'''simple docstring'''
return 1E-4
| 7 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( _snake_case : int = 1000000 , _snake_case : int = 10 ) -> int:
'''simple docstring'''
__magic_name__ : defaultdict = defaultdict(__a )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__magic_name__ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__a , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 355 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
snake_case : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case )} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
UpperCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
UpperCamelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
UpperCamelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
UpperCamelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
UpperCamelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'train'
UpperCamelCase__ = 'dev'
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = False , _a = None , _a = "pt" , ):
__magic_name__ : Optional[Any] = args
__magic_name__ : str = is_language_sensitive
__magic_name__ : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
__magic_name__ : int = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__magic_name__ : List[str] = mode
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = "v2" if args.version_2_with_negative else "v1"
__magic_name__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Optional[Any] = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Dict = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__magic_name__ : Dict = self.old_features["features"]
__magic_name__ : Optional[Any] = self.old_features.get("dataset" , _a )
__magic_name__ : str = self.old_features.get("examples" , _a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
__magic_name__ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
__magic_name__ : Optional[Any] = self.processor.get_train_examples(args.data_dir )
__magic_name__ , __magic_name__ : List[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
__magic_name__ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
# Convert to Tensors and build dataset
__magic_name__ : List[Any] = self.features[i]
__magic_name__ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.attention_mask , dtype=torch.long )
__magic_name__ : List[str] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
__magic_name__ : str = torch.tensor(feature.p_mask , dtype=torch.float )
__magic_name__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
__magic_name__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__magic_name__ : Optional[int] = torch.tensor(feature.start_position , dtype=torch.long )
__magic_name__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
a__ : List[str] = tuple[int, int, int]
a__ : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
a__ : str = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
a__ : List[str] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
a__ : Any = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
a__ : Optional[int] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
a__ : Tuple = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
a__ : str = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
a__ : Optional[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
a__ : List[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
a__ : Optional[int] = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
a__ : List[Any] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase_ ) )) < 3:
__SCREAMING_SNAKE_CASE = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase_ )
# Checks if rotor positions are valid
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
# Validates string and returns dict
__SCREAMING_SNAKE_CASE = _plugboard(lowerCAmelCase_ )
return rotpos, rotsel, pbdict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Plugboard setting isn't type string ({type(lowerCAmelCase_ )})"""
raise TypeError(lowerCAmelCase_ )
elif len(lowerCAmelCase_ ) % 2 != 0:
__SCREAMING_SNAKE_CASE = f"""Odd number of symbols ({len(lowerCAmelCase_ )})"""
raise Exception(lowerCAmelCase_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
__SCREAMING_SNAKE_CASE = set()
for i in pbstring:
if i not in abc:
__SCREAMING_SNAKE_CASE = f"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase_ )
elif i in tmppbl:
__SCREAMING_SNAKE_CASE = f"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase_ )
else:
tmppbl.add(lowerCAmelCase_ )
del tmppbl
# Created the dictionary
__SCREAMING_SNAKE_CASE = {}
for j in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ):
__SCREAMING_SNAKE_CASE = pbstring[j + 1]
__SCREAMING_SNAKE_CASE = pbstring[j]
return pb
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (rotora, rotora, rotora) , lowerCAmelCase_ = "" , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.upper()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _validator(
lowerCAmelCase_ , lowerCAmelCase_ , plugb.upper() )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__SCREAMING_SNAKE_CASE = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# rotor ra --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rb --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rc --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__SCREAMING_SNAKE_CASE = reflector[symbol]
# 2nd rotors
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Union[str, Any] = '''This is my Python script that emulates the Enigma machine from WWII.'''
a__ : Any = (1, 1, 1)
a__ : Optional[int] = '''pictures'''
a__ : Union[str, Any] = (rotora, rotora, rotora)
a__ : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 54 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = """▁"""
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =BigBirdTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''<s>'''
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_rust_tokenizer()
__snake_case : Dict = '''I was born in 92000, and this is falsé.'''
__snake_case : int = tokenizer.tokenize(a_ )
__snake_case : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
__snake_case : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''Hello World!'''
__snake_case : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Tuple = ''' '''.join(a_ )
__snake_case : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__snake_case : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 102 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , _a = True , _a = None , _a = 0.9 , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = 1 / 255 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**__a )
lowerCAmelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a )
lowerCAmelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCAmelCase_ = get_size_dict(__a , param_name="crop_size" )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = crop_pct
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __a ( self , _a , _a , _a = None , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase_ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase_ = int(size["height"] / crop_pct )
else:
lowerCAmelCase_ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(__a ) )
lowerCAmelCase_ = get_resize_output_image_size(__a , size=__a , default_to_square=__a )
else:
if "shortest_edge" in size:
lowerCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
lowerCAmelCase_ = (size['height'], size['width'])
else:
raise ValueError("Invalid size for resize: {}".format(__a ) )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __a ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
lowerCAmelCase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __a ( self , _a , _a , _a = None , **_a , ) -> Any:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(__a , default_to_square=__a )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(__a , param_name="crop_size" )
lowerCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images]
if do_center_crop:
lowerCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE () -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 2 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """big_bird"""
def __init__( self : Any , UpperCAmelCase : List[str]=5_03_58 , UpperCAmelCase : List[str]=7_68 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : str=12 , UpperCAmelCase : Any=30_72 , UpperCAmelCase : str="gelu_new" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[Any]=40_96 , UpperCAmelCase : str=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Optional[int]=1e-1_2 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[str]=66 , UpperCAmelCase : Optional[int]="block_sparse" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Union[str, Any]=64 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[int] , ):
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , sep_token_id=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Any = rescale_embeddings
lowerCAmelCase_ : Optional[Any] = attention_type
lowerCAmelCase_ : Tuple = use_bias
lowerCAmelCase_ : Any = block_size
lowerCAmelCase_ : Dict = num_random_blocks
lowerCAmelCase_ : str = classifier_dropout
class __a ( __UpperCamelCase ):
@property
def A ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 28 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = XGLMTokenizer
UpperCAmelCase_ = XGLMTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def snake_case_ (self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = XGLMTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = "<pad>"
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(__a ) , 10_08 )
def snake_case_ (self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = XGLMTokenizer(__a , keep_accents=__a )
UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case_ (self ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def snake_case_ (self ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCamelCase = XGLMTokenizer(f.name , keep_accents=__a )
UpperCamelCase = pickle.dumps(__a )
pickle.loads(__a )
def snake_case_ (self ) -> int:
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = "I was born in 92000, and this is falsé."
UpperCamelCase = tokenizer.tokenize(__a )
UpperCamelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a )
UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(__a )
UpperCamelCase = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "Hello World!"
UpperCamelCase = [2, 3_12_27, 44_47, 35]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def snake_case_ (self ) -> Tuple:
# fmt: off
UpperCamelCase = {
"input_ids": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/xglm-564M" , padding=__a , )
| 153 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCAmelCase__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCAmelCase__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def snake_case_ (self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None , __a="warn" , ) -> str:
UpperCamelCase = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 153 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A ( a_ ) -> Tuple:
__UpperCamelCase : Dict =torch.exp(a_ )
__UpperCamelCase : List[str] =torch.sum(a_ ,dim=1 ) # sum of exp(x_i)
__UpperCamelCase : Union[str, Any] =torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a_ ) - B / A
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Optional[int] =config.output_attentions
__UpperCamelCase : List[str] =config.output_hidden_states
__UpperCamelCase : int =nn.ModuleList([BertLayer(lowerCamelCase__ ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Any =nn.ModuleList([BertHighway(lowerCamelCase__ ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Union[str, Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if (type(lowerCamelCase__ ) is float) or (type(lowerCamelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str =x
else:
__UpperCamelCase : Optional[Any] =x
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =()
__UpperCamelCase : int =()
__UpperCamelCase : Optional[int] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] =all_hidden_states + (hidden_states,)
__UpperCamelCase : List[str] =layer_module(
lowerCamelCase__ , lowerCamelCase__ , head_mask[i] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] =all_attentions + (layer_outputs[1],)
__UpperCamelCase : List[Any] =(hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Tuple =current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : str =current_outputs + (all_attentions,)
__UpperCamelCase : Tuple =self.highway[i](lowerCamelCase__ )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Any =highway_exit[0]
__UpperCamelCase : Union[str, Any] =entropy(lowerCamelCase__ )
__UpperCamelCase : List[str] =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Union[str, Any] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase__ , i + 1 )
else:
__UpperCamelCase : Optional[int] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : Any =all_hidden_states + (hidden_states,)
__UpperCamelCase : Tuple =(hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Optional[int] =outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Union[str, Any] =outputs + (all_attentions,)
__UpperCamelCase : Dict =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , a , )
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : List[Any] =config
__UpperCamelCase : Union[str, Any] =BertEmbeddings(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =DeeBertEncoder(lowerCamelCase__ )
__UpperCamelCase : str =BertPooler(lowerCamelCase__ )
self.init_weights()
def __lowercase ( self ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self ):
"""simple docstring"""
return self.embeddings.word_embeddings
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =value
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase__ )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__UpperCamelCase : List[str] =input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : int =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__UpperCamelCase : Tuple =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : Any =torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
if encoder_attention_mask is None:
__UpperCamelCase : List[str] =torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
if token_type_ids is None:
__UpperCamelCase : List[str] =torch.zeros(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor =self.get_extended_attention_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Union[str, Any] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : int =encoder_attention_mask[:, None, None, :]
__UpperCamelCase : Dict =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict =(1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : List[str] =self.get_head_mask(lowerCamelCase__ , self.config.num_hidden_layers )
__UpperCamelCase : Tuple =self.embeddings(
input_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.encoder(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =encoder_outputs[0]
__UpperCamelCase : Dict =self.pooler(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =message
__UpperCamelCase : Any =exit_layer # start from 1!
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : List[Any] =BertPooler(lowerCamelCase__ )
__UpperCamelCase : Any =nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : int =nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =encoder_outputs[0]
__UpperCamelCase : int =self.pooler(lowerCamelCase__ )
# "return" pooler_output
# BertModel
__UpperCamelCase : int =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : List[str] =bmodel_output[1]
__UpperCamelCase : List[str] =self.dropout(lowerCamelCase__ )
__UpperCamelCase : int =self.classifier(lowerCamelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , a , )
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =config.num_labels
__UpperCamelCase : Optional[int] =config.num_hidden_layers
__UpperCamelCase : Any =DeeBertModel(lowerCamelCase__ )
__UpperCamelCase : Dict =nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Tuple =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=-1 , lowerCamelCase__=False , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.num_layers
try:
__UpperCamelCase : Dict =self.bert(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : int =outputs[1]
__UpperCamelCase : List[Any] =self.dropout(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.classifier(lowerCamelCase__ )
__UpperCamelCase : Tuple =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : Optional[Any] =e.message
__UpperCamelCase : Tuple =e.exit_layer
__UpperCamelCase : Dict =outputs[0]
if not self.training:
__UpperCamelCase : int =entropy(lowerCamelCase__ )
__UpperCamelCase : str =[]
__UpperCamelCase : List[Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : int =MSELoss()
__UpperCamelCase : Union[str, Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : List[Any] =CrossEntropyLoss()
__UpperCamelCase : Union[str, Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : Tuple =[]
for highway_exit in outputs[-1]:
__UpperCamelCase : List[Any] =highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Dict =MSELoss()
__UpperCamelCase : List[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : str =CrossEntropyLoss()
__UpperCamelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
__UpperCamelCase : Union[str, Any] =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict =(loss,) + outputs
if not self.training:
__UpperCamelCase : List[Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : List[str] =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 245 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""xlnet"""
UpperCamelCase__ : Tuple =["""mems"""]
UpperCamelCase__ : Any ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : str =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase : Optional[Any] =d_model // n_head
__UpperCamelCase : List[Any] =ff_activation
__UpperCamelCase : Tuple =d_inner
__UpperCamelCase : List[Any] =untie_r
__UpperCamelCase : List[Any] =attn_type
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : List[str] =layer_norm_eps
__UpperCamelCase : List[str] =dropout
__UpperCamelCase : int =mem_len
__UpperCamelCase : List[Any] =reuse_len
__UpperCamelCase : Union[str, Any] =bi_data
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : Tuple =same_length
__UpperCamelCase : int =summary_type
__UpperCamelCase : Dict =summary_use_proj
__UpperCamelCase : Dict =summary_activation
__UpperCamelCase : str =summary_last_dropout
__UpperCamelCase : Dict =start_n_top
__UpperCamelCase : Optional[Any] =end_n_top
__UpperCamelCase : int =bos_token_id
__UpperCamelCase : Union[str, Any] =pad_token_id
__UpperCamelCase : Dict =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCamelCase__ , )
__UpperCamelCase : Dict =kwargs['use_cache']
__UpperCamelCase : Optional[int] =use_mems_eval
__UpperCamelCase : Any =use_mems_train
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 245 | 1 |
import unittest
from transformers import DonutProcessor
a ="""naver-clova-ix/donut-base"""
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : str):
__lowerCamelCase : Tuple = DonutProcessor.from_pretrained(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : str = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
__lowerCamelCase : List[Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
__lowerCamelCase : Tuple = self.processor.tokenajson(SCREAMING_SNAKE_CASE__)
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 73 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'gpt_bigcode'
lowerCamelCase__ : str = ['past_key_values']
lowerCamelCase__ : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=1_0_2_4, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=None, lowerCamelCase_="gelu_pytorch_tanh", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1e-5, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Dict = n_positions
lowerCamelCase__ : int = n_embd
lowerCamelCase__ : Tuple = n_layer
lowerCamelCase__ : Optional[Any] = n_head
lowerCamelCase__ : Any = n_inner
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Tuple = resid_pdrop
lowerCamelCase__ : Optional[Any] = embd_pdrop
lowerCamelCase__ : List[str] = attn_pdrop
lowerCamelCase__ : Union[str, Any] = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Any = scale_attn_weights
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : int = attention_softmax_in_fpaa
lowerCamelCase__ : List[str] = scale_attention_softmax_in_fpaa
lowerCamelCase__ : Tuple = multi_query
lowerCamelCase__ : str = bos_token_id
lowerCamelCase__ : Tuple = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase )
| 354 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[Any] = 1024
_UpperCAmelCase : Optional[int] = 4096
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : str = [5, 11, 17, 23]
_UpperCAmelCase : Tuple = [256, 512, 1024, 1024]
_UpperCAmelCase : List[str] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = 150
_UpperCAmelCase : Tuple = """huggingface/label-files"""
_UpperCAmelCase : int = """ade20k-id2label.json"""
_UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
_UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : List[str] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_UpperCAmelCase : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_UpperCAmelCase : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_UpperCAmelCase : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_UpperCAmelCase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_UpperCAmelCase : List[str] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_UpperCAmelCase : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_UpperCAmelCase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : Tuple = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_UpperCAmelCase : List[str] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_UpperCAmelCase : Any = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_UpperCAmelCase : Dict = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_UpperCAmelCase : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_UpperCAmelCase : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : List[str] = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
_UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Tuple = 480 if """ade""" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(lowerCAmelCase_ , return_tensors="""pt""" )
# forward pass
_UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
# Assert logits
_UpperCAmelCase : Optional[int] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(lowerCAmelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ )
)
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
A_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 215 | 1 |
"""simple docstring"""
import numpy as np
def _lowercase ( __snake_case ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _lowercase ( __snake_case ) -> np.ndarray:
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 58 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states) | 58 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "ctrl"
A = ["past_key_values"]
A = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _UpperCAmelCase=2_4_6_5_3_4 , _UpperCAmelCase=2_5_6 , _UpperCAmelCase=1_2_8_0 , _UpperCAmelCase=8_1_9_2 , _UpperCAmelCase=4_8 , _UpperCAmelCase=1_6 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Dict:
__UpperCamelCase : Union[str, Any] = vocab_size
__UpperCamelCase : Optional[Any] = n_positions
__UpperCamelCase : Tuple = n_embd
__UpperCamelCase : Optional[int] = n_layer
__UpperCamelCase : Any = n_head
__UpperCamelCase : List[Any] = dff
__UpperCamelCase : List[str] = resid_pdrop
__UpperCamelCase : Union[str, Any] = embd_pdrop
__UpperCamelCase : Any = layer_norm_epsilon
__UpperCamelCase : Optional[int] = initializer_range
__UpperCamelCase : Optional[Any] = use_cache
super().__init__(**_UpperCAmelCase )
| 298 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__UpperCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__UpperCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__UpperCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__UpperCamelCase , default=0 , help="cuda_id." , )
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
return args
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
if not len(__UpperCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCAmelCase_ , lowerCAmelCase_ : int = imgs[0].size
lowerCAmelCase_ : Tuple = Image.new("RGB" , size=(cols * w, rows * h) )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = grid.size
for i, img in enumerate(__UpperCamelCase ):
grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="robotic cat with wings" , __UpperCamelCase=7.5 , __UpperCamelCase=50 , __UpperCamelCase=1 , __UpperCamelCase=42 , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = pipeline(
__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images
lowerCAmelCase_ : Tuple = int(math.sqrt(__UpperCamelCase ) )
lowerCAmelCase_ : List[Any] = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
lowercase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
lowercase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
lowercase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
lowercase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
lowercase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
lowercase__ = unet.to(torch.device("""cuda""", args.cuda_id))
lowercase__ = pipeline.to(unet.device)
lowercase__ , lowercase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
lowercase__ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 161 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase ( self : Any , a_ : Dict=0 ):
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a_ ) )
lowerCAmelCase_ : Tuple = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ : Union[str, Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs()
lowerCAmelCase_ : Tuple = pipe(**a_ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[str] = pipe(**a_ ).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = ort.SessionOptions()
lowerCAmelCase_ : List[str] = False
return options
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : List[Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Dict = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : Dict = output.images
lowerCAmelCase_ : Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : Union[str, Any] = init_image.resize((1_28, 1_28) )
lowerCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowerCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Any = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 161 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """num_attention_heads""" ) )
class A :
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : Dict=64 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Tuple=[128, 256, 384] , _UpperCAmelCase : int=[4, 6, 8] , _UpperCAmelCase : List[str]=[2, 3, 4] , _UpperCAmelCase : List[Any]=[16, 16, 16] , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=[2, 2, 2] , _UpperCAmelCase : Optional[Any]=[2, 2, 2] , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=2 , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase__ = problem_type["""title"""]
lowercase__ = problem_type["""num_labels"""]
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
lowercase__ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = AutoencoderKL
A__ = '''sample'''
A__ = 1E-2
@property
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = 4
__snake_case : int = 3
__snake_case : Dict = (32, 32)
__snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : Any = self.prepare_init_args_and_inputs_for_common()
__snake_case : Union[str, Any] = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
__snake_case : Union[str, Any] = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : List[str] = torch.randn_like(__a )
__snake_case : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[Any] = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : Tuple = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Optional[int] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case : int = dict(model.named_parameters() )
__snake_case : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : int = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__a )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case : Tuple = model.to(__a )
model.eval()
if torch_device == "mps":
__snake_case : str = torch.manual_seed(0 )
else:
__snake_case : List[Any] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : str = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(__a )
with torch.no_grad():
__snake_case : Any = model(__a , sample_posterior=__a , generator=__a ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Dict = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
__snake_case : Optional[int] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case : Optional[int] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) )
@slow
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Dict , __a : Tuple , __a : Optional[Any] ) -> Any:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] , __a : List[str]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : str = torch.floataa if fpaa else torch.floataa
__snake_case : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def A_ ( self : Union[str, Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : str=False ) -> Tuple:
'''simple docstring'''
__snake_case : Any = 'fp16' if fpaa else None
__snake_case : Dict = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
__a , subfolder='vae' , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def A_ ( self : Dict , __a : Optional[int]=0 ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A_ ( self : str , __a : Any , __a : int , __a : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(__a )
__snake_case : Optional[int] = self.get_generator(__a )
with torch.no_grad():
__snake_case : List[str] = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Dict = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(__a , __a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , __a : Tuple , __a : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = self.get_sd_vae_model(fpaa=__a )
__snake_case : List[str] = self.get_sd_image(__a , fpaa=__a )
__snake_case : List[str] = self.get_generator(__a )
with torch.no_grad():
__snake_case : Any = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
__snake_case : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A_ ( self : Optional[int] , __a : Any , __a : Optional[int] , __a : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(__a )
with torch.no_grad():
__snake_case : int = model(__a ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Any = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(__a , __a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_sd_vae_model()
__snake_case : Any = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : Optional[Any] = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[Any] = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , __a : Optional[int] , __a : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = self.get_sd_vae_model(fpaa=__a )
__snake_case : Tuple = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
__snake_case : List[Any] = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : List[Any] = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def A_ ( self : int , __a : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_sd_vae_model(fpaa=__a )
__snake_case : Optional[int] = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
__snake_case : List[Any] = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Optional[Any] = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def A_ ( self : int , __a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = self.get_sd_vae_model()
__snake_case : Tuple = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : Dict = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : List[Any] = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A_ ( self : Dict , __a : List[str] , __a : List[str] ) -> int:
'''simple docstring'''
__snake_case : List[Any] = self.get_sd_vae_model()
__snake_case : Any = self.get_sd_image(__a )
__snake_case : Optional[int] = self.get_generator(__a )
with torch.no_grad():
__snake_case : Union[str, Any] = model.encode(__a ).latent_dist
__snake_case : Any = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(__a )
__snake_case : Optional[int] = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(__a , __a , atol=__a )
| 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , a : Dict , a : Tuple=3 , a : Any=32 , a : str=3 , a : Optional[int]=10 , a : Union[str, Any]=[10, 20, 30, 40] , a : Any=[1, 1, 2, 1] , a : str=True , a : Dict=True , a : Optional[Any]="relu" , a : List[Any]=3 , a : Any=None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[str] = embeddings_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Any = len(a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCamelCase ( self : Optional[Any] , a : List[str] , a : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = FlaxRegNetModel(config=a )
SCREAMING_SNAKE_CASE : List[str] = model(a )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : List[Any] , a : List[Any] , a : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetForImageClassification(config=a )
SCREAMING_SNAKE_CASE : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=a , has_text_modality=a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(a )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(a : List[str] , a : Tuple , a : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = model_class(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(a , a , a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(a )
@jax.jit
def model_jitted(a : Optional[int] , **a : str ):
return model(pixel_values=a , **a )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE : str = model_jitted(**a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a , return_tensors="np" )
SCREAMING_SNAKE_CASE : int = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : str = (1, 1000)
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Any = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) | 76 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "spiece.model"}
lowercase = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
lowercase = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ) -> None:
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sep_token=a , mask_token=a , cls_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a ) -> Optional[Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self , a ) -> Dict:
return self.sp_model.piece_to_id(a )
def _UpperCamelCase ( self , a ) -> Union[str, Any]:
snake_case_ = self.sp_model.IdToPiece(a )
return token
def _UpperCamelCase ( self , a ) -> List[Any]:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a )
snake_case_ = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def _UpperCamelCase ( self , a , a = False , a = None , a = True , **a , ) -> str:
snake_case_ = kwargs.pop('use_source_tokenizer' , a )
snake_case_ = self.convert_ids_to_tokens(a , skip_special_tokens=a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
snake_case_ = []
sub_texts.append(a )
else:
current_sub_text.append(a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(a ) )
else:
snake_case_ = ''.join(a )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(a )
return clean_text
else:
return text
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 178 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = (DEISMultistepScheduler,)
snake_case__ : Tuple = (("""num_inference_steps""", 2_5),)
def _A ( self : Union[str, Any] , **__lowerCamelCase : Tuple ):
UpperCamelCase :Optional[int] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__lowerCamelCase )
return config
def _A ( self : List[str] , __lowerCamelCase : Dict=0 , **__lowerCamelCase : Tuple ):
UpperCamelCase :List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase :Dict = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
UpperCamelCase :Dict = self.dummy_sample
UpperCamelCase :Any = 0.1 * sample
UpperCamelCase :Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase :Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase :List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
UpperCamelCase :Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
UpperCamelCase :Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
UpperCamelCase :Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase :str = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase :Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
UpperCamelCase :Optional[int] = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self : Optional[int] ):
pass
def _A ( self : str , __lowerCamelCase : Any=0 , **__lowerCamelCase : Optional[int] ):
UpperCamelCase :Dict = dict(self.forward_default_kwargs )
UpperCamelCase :Union[str, Any] = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
UpperCamelCase :Optional[Any] = self.dummy_sample
UpperCamelCase :List[Any] = 0.1 * sample
UpperCamelCase :Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase :Any = self.get_scheduler_config()
UpperCamelCase :str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase :Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
UpperCamelCase :Optional[Any] = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase :List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase :Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
UpperCamelCase :Tuple = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self : int , __lowerCamelCase : int=None , **__lowerCamelCase : List[Any] ):
if scheduler is None:
UpperCamelCase :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase :str = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase :Optional[int] = scheduler_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :Any = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase :Dict = scheduler_class(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Dict = self.dummy_model()
UpperCamelCase :int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase :List[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _A ( self : List[str] ):
UpperCamelCase :Tuple = dict(self.forward_default_kwargs )
UpperCamelCase :Optional[Any] = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :int = scheduler_class(**__lowerCamelCase )
UpperCamelCase :Tuple = self.dummy_sample
UpperCamelCase :List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , """set_timesteps""" ):
UpperCamelCase :Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase :List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase :List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase :str = scheduler.timesteps[5]
UpperCamelCase :List[Any] = scheduler.timesteps[6]
UpperCamelCase :Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
UpperCamelCase :Union[str, Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _A ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCamelCase :List[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase :int = self.full_loop(scheduler=__lowerCamelCase )
UpperCamelCase :List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
UpperCamelCase :str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase :List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase :Dict = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase :int = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase :List[str] = self.full_loop(scheduler=__lowerCamelCase )
UpperCamelCase :List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def _A ( self : Optional[Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _A ( self : str ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , algorithm_type="""deis""" , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _A ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _A ( self : List[str] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
UpperCamelCase :int = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _A ( self : Tuple ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _A ( self : Union[str, Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _A ( self : List[Any] ):
UpperCamelCase :str = self.full_loop()
UpperCamelCase :Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def _A ( self : List[Any] ):
UpperCamelCase :Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase :List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :Optional[int] = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
UpperCamelCase :Tuple = scheduler_class(**__lowerCamelCase )
UpperCamelCase :Tuple = 10
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase :Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 358 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : List[Any] = StableDiffusionLDMaDPipeline
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase :Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :Optional[int] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[str] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Union[str, Any] ):
UpperCamelCase :str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Any = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1]
UpperCamelCase :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :int = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCamelCase :Dict = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : str ):
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :int = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :Tuple = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :List[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase :List[Any] = ldmad_pipe.tokenizer(
__lowerCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="""pt""" , )
UpperCamelCase :List[str] = text_inputs["""input_ids"""].to(__lowerCamelCase )
UpperCamelCase :List[Any] = ldmad_pipe.text_encoder(__lowerCamelCase )[0]
UpperCamelCase :Dict = prompt_embeds
# forward
UpperCamelCase :str = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = output.rgb, output.depth
UpperCamelCase :Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : List[Any] ):
UpperCamelCase :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Any = self.get_dummy_components()
UpperCamelCase :Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
UpperCamelCase :Tuple = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[int] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :str = """french fries"""
UpperCamelCase :Optional[int] = ldmad_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :List[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase :str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :Dict = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCamelCase :Any = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Tuple="cpu" , __lowerCamelCase : str=torch.floataa , __lowerCamelCase : Tuple=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :str = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase :Any = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[int] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase :Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase :Tuple = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCamelCase :Optional[Any] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : int=torch.floataa , __lowerCamelCase : Union[str, Any]=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : str ):
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Union[str, Any] = output.rgb, output.depth
UpperCamelCase :Dict = 0.495586
UpperCamelCase :Dict = 0.33795515
UpperCamelCase :Union[str, Any] = 112.48518
UpperCamelCase :Any = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :int = output.rgb, output.depth
UpperCamelCase :Optional[int] = 0.4194127
UpperCamelCase :str = 0.35375586
UpperCamelCase :Union[str, Any] = 0.5638502
UpperCamelCase :Union[str, Any] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 62 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.