code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : int = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Tuple = """gptsan-japanese"""
lowerCamelCase__ : Dict = [
"""past_key_values""",
]
lowerCamelCase__ : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A_=3_60_00 , A_=12_80 , A_=10_24 , A_=81_92 , A_=40_96 , A_=1_28 , A_=10 , A_=0 , A_=16 , A_=16 , A_=1_28 , A_=0.0 , A_=1E-5 , A_=False , A_=0.0 , A_="float32" , A_=False , A_=False , A_=False , A_=0.002 , A_=False , A_=True , A_=3_59_98 , A_=3_59_95 , A_=3_59_99 , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = d_ext
SCREAMING_SNAKE_CASE__ = d_spout
SCREAMING_SNAKE_CASE__ = num_switch_layers
SCREAMING_SNAKE_CASE__ = num_ext_layers
SCREAMING_SNAKE_CASE__ = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = router_bias
SCREAMING_SNAKE_CASE__ = router_jitter_noise
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = output_hidden_states
SCREAMING_SNAKE_CASE__ = output_attentions
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = output_router_logits
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(
separator_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , **A_ , )
| 100 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
_UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
_UpperCamelCase : int = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( snake_case : int , snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = BertAbsConfig(
temp_dir='.' , finetune_bert=snake_case , large=snake_case , share_emb=snake_case , use_bert_emb=snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase = torch.load(snake_case , lambda snake_case , snake_case : storage )
lowerCAmelCase = AbsSummarizer(snake_case , torch.device('cpu' ) , snake_case )
original.eval()
lowerCAmelCase = BertAbsSummarizer(snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCAmelCase = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
lowerCAmelCase = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase = encoder_input_ids
lowerCAmelCase = decoder_input_ids
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase = original(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = original.generator(snake_case )
lowerCAmelCase = new_model(
snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = new_model.generator(snake_case )
lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.allclose(snake_case , snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 284 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
a_ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
a_ , a_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
a_ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
a_ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
a_ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 708 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : Dict , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase_ ( __a : Any , __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ''
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ ( __a : List[Any] , __a : str ):
'''simple docstring'''
_lowerCamelCase : str = int('0b' + data[0] + data[-1] , 2 )
_lowerCamelCase : Optional[int] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ ( __a : Dict , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = message[:4]
_lowerCamelCase : Optional[Any] = message[4:]
_lowerCamelCase : Union[str, Any] = apply_table(__a , __a )
_lowerCamelCase : int = xor(__a , __a )
_lowerCamelCase : str = apply_sbox(__a , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(__a , temp[4:] )
_lowerCamelCase : Dict = '0' * (2 - len(__a )) + l # noqa: E741
_lowerCamelCase : Optional[Any] = '0' * (2 - len(__a )) + r
_lowerCamelCase : Tuple = apply_table(l + r , __a )
_lowerCamelCase : Tuple = xor(__a , __a )
return temp + right
if __name__ == "__main__":
a_ = input("""Enter 10 bit key: """)
a_ = input("""Enter 8 bit message: """)
a_ = [6, 3, 7, 4, 8, 5, 10, 9]
a_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ = [2, 4, 3, 1]
a_ = [2, 6, 3, 1, 4, 8, 5, 7]
a_ = [4, 1, 3, 5, 7, 2, 8, 6]
a_ = [4, 1, 2, 3, 2, 3, 4, 1]
a_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ = apply_table(key, paa_table)
a_ = temp[:5]
a_ = temp[5:]
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
# encryption
a_ = apply_table(message, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ = apply_table(CT, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 349 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_UpperCamelCase ):
self.assertDictEqual(_UpperCamelCase , example_records[i] )
def snake_case__( self : Optional[int] ) ->Any:
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(_UpperCamelCase )
snake_case_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case__( self : Dict ) ->Optional[int]: # checks what happens with missing columns
snake_case_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def snake_case__( self : Dict ) ->str: # checks if the type can be inferred from the second record
snake_case_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
snake_case_ = Dataset.from_list(_UpperCamelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def snake_case__( self : Dict ) ->int:
snake_case_ = Dataset.from_list([] )
self.assertEqual(len(_UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] ) | 39 | # Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class UpperCamelCase :
__UpperCamelCase = """dummy_data"""
__UpperCamelCase = """datasets"""
__UpperCamelCase = False
def __init__( self : Dict ,_lowerCAmelCase : str ,_lowerCAmelCase : str ,_lowerCAmelCase : Union[Version, str] ,_lowerCAmelCase : Optional[str] = None ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = True ,_lowerCAmelCase : Optional[List[Callable]] = None ,):
"""simple docstring"""
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(_lowerCAmelCase )
# to be downloaded
__snake_case = None
__snake_case = None
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
_lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=_lowerCAmelCase ,force_extract=_lowerCAmelCase )
return os.path.join(_lowerCAmelCase ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[Any] ,*_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
return self.create_dummy_data_dict(_lowerCAmelCase ,_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(_lowerCAmelCase ,_lowerCAmelCase )
else:
return self.create_dummy_data_single(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Dict ,_lowerCAmelCase : str ,*_lowerCAmelCase : Dict ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,*_lowerCAmelCase : List[str] ,**_lowerCAmelCase : List[Any] ):
"""simple docstring"""
return path
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return {}
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : int ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for single_url in single_urls:
download_callback(_lowerCAmelCase )
else:
__snake_case = single_urls
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) ) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) )
__snake_case = value
# make sure that values are unique
if all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,_lowerCAmelCase ) ) for url in data_url )
__snake_case = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(_lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_lowerCAmelCase )
return dummy_data_list
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : Any ,_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
def _iter_archive_members(_lowerCAmelCase : Tuple ):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file ).parent
__snake_case = path.relative_to(_lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowerCAmelCase )
__snake_case = Path(_lowerCAmelCase )
__snake_case = _iter_archive_members(_lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_lowerCAmelCase ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Tuple ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [paths]
for path in paths:
if os.path.isfile(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
| 524 | 0 |
def _a ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCAmelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 709 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCamelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCamelCase = '''UperNetConfig'''
class a__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Union[int, Tuple[int, int]] , lowerCamelCase_ : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
a_ : str = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , bias=lowerCamelCase_ , dilation=lowerCamelCase_ , )
a_ : Dict = nn.BatchNormad(lowerCamelCase_ )
a_ : Union[str, Any] = nn.ReLU()
def UpperCAmelCase( self : str , lowerCamelCase_ : torch.Tensor ):
a_ : Tuple = self.conv(lowerCamelCase_ )
a_ : int = self.batch_norm(lowerCamelCase_ )
a_ : Optional[int] = self.activation(lowerCamelCase_ )
return output
class a__ ( nn.Module ):
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
super().__init__()
a_ : Union[str, Any] = [
nn.AdaptiveAvgPoolad(lowerCamelCase_ ),
UperNetConvModule(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : torch.Tensor ):
a_ : Optional[int] = input
for layer in self.layers:
a_ : Dict = layer(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple[int, ...] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : bool ):
super().__init__()
a_ : Optional[int] = pool_scales
a_ : List[Any] = align_corners
a_ : Union[str, Any] = in_channels
a_ : List[str] = channels
a_ : Optional[int] = []
for i, pool_scale in enumerate(lowerCamelCase_ ):
a_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase_ , in_channels=lowerCamelCase_ , channels=lowerCamelCase_ )
self.blocks.append(lowerCamelCase_ )
self.add_module(str(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : torch.Tensor ):
a_ : List[str] = []
for ppm in self.blocks:
a_ : Any = ppm(lowerCamelCase_ )
a_ : str = nn.functional.interpolate(
lowerCamelCase_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase_ )
return ppm_outs
class a__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ):
super().__init__()
a_ : Tuple = config
a_ : int = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : str = in_channels
a_ : str = config.hidden_size
a_ : int = False
a_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : List[Any] = nn.ModuleList()
a_ : Optional[int] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : List[str] = UperNetConvModule(lowerCamelCase_ , self.channels , kernel_size=1 )
a_ : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase_ )
self.fpn_convs.append(lowerCamelCase_ )
a_ : str = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Union[str, Any] ):
if isinstance(lowerCamelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : Optional[int] ):
a_ : Optional[int] = inputs[-1]
a_ : List[str] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase_ ) )
a_ : int = torch.cat(lowerCamelCase_ , dim=1 )
a_ : Tuple = self.bottleneck(lowerCamelCase_ )
return output
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : torch.Tensor ):
# build laterals
a_ : List[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase_ ) )
# build top-down path
a_ : Tuple = len(lowerCamelCase_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : List[Any] = laterals[i - 1].shape[2:]
a_ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase_ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
a_ : int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
a_ : Optional[Any] = torch.cat(lowerCamelCase_ , dim=1 )
a_ : Tuple = self.fpn_bottleneck(lowerCamelCase_ )
a_ : str = self.classifier(lowerCamelCase_ )
return output
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
a_ : Union[str, Any] = config
a_ : str = config.auxiliary_in_channels
a_ : List[str] = config.auxiliary_channels
a_ : List[Any] = config.auxiliary_num_convs
a_ : Optional[Any] = config.auxiliary_concat_input
a_ : str = in_index
a_ : str = (kernel_size // 2) * dilation
a_ : Union[str, Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=lowerCamelCase_ , dilation=lowerCamelCase_ ) )
if self.num_convs == 0:
a_ : Dict = nn.Identity()
else:
a_ : Optional[Any] = nn.Sequential(*lowerCamelCase_ )
if self.concat_input:
a_ : List[str] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase_ , padding=kernel_size // 2 )
a_ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase( self : Optional[int] ):
self.apply(self._init_weights )
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : Dict ):
if isinstance(lowerCamelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase( self : Any , lowerCamelCase_ : torch.Tensor ):
# just take the relevant feature maps
a_ : List[Any] = encoder_hidden_states[self.in_index]
a_ : Any = self.convs(lowerCamelCase_ )
if self.concat_input:
a_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : Union[str, Any] = self.classifier(lowerCamelCase_ )
return output
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: Tuple = UperNetConfig
lowerCamelCase__: Tuple = """pixel_values"""
lowerCamelCase__: Optional[Any] = True
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : str ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase( self : List[str] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str=False ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a_ : List[Any] = value
__lowerCamelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase_ , )
class a__ ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
super().__init__(lowerCamelCase_ )
a_ : str = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[Any] = UperNetHead(lowerCamelCase_ , in_channels=self.backbone.channels )
a_ : List[str] = UperNetFCNHead(lowerCamelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , ):
a_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : List[Any] = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , output_attentions=lowerCamelCase_ )
a_ : str = outputs.feature_maps
a_ : Tuple = self.decode_head(lowerCamelCase_ )
a_ : Optional[Any] = nn.functional.interpolate(lowerCamelCase_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase_ )
a_ : List[Any] = None
if self.auxiliary_head is not None:
a_ : Dict = self.auxiliary_head(lowerCamelCase_ )
a_ : List[Any] = nn.functional.interpolate(
lowerCamelCase_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase_ )
a_ : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
a_ : int = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : List[Any] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
a_ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : str = (logits,) + outputs[1:]
else:
a_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 478 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
# Initialise PyTorch model
_lowerCAmelCase = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
_lowerCAmelCase = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
_lowercase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase: str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 192 |
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=None , lowercase=None ) -> List[Any]:
lowerCamelCase_ = data
lowerCamelCase_ = previous
lowerCamelCase_ = next_node
def __str__( self ) -> str:
return f'{self.data}'
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.data
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return self.next
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.previous
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = head
def __iter__( self ) -> int:
return self
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
if not self.current:
raise StopIteration
else:
lowerCamelCase_ = self.current.get_data()
lowerCamelCase_ = self.current.get_next()
return value
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> Union[str, Any]:
lowerCamelCase_ = None # First node in list
lowerCamelCase_ = None # Last node in list
def __str__( self ) -> Optional[Any]:
lowerCamelCase_ = self.head
lowerCamelCase_ = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase_ = current.get_next()
return " ".join(str(lowercase ) for node in nodes )
def __contains__( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase_ = current.get_next()
return False
def __iter__( self ) -> List[str]:
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
if self.head is None:
lowerCamelCase_ = node
lowerCamelCase_ = node
else:
self.insert_before_node(self.head , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
if self.head is None:
self.set_head(lowercase )
else:
self.insert_after_node(self.tail , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = Node(lowercase )
if self.head is None:
self.set_head(lowercase )
else:
self.set_tail(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = node
lowerCamelCase_ = node.previous
if node.get_previous() is None:
lowerCamelCase_ = node_to_insert
else:
lowerCamelCase_ = node_to_insert
lowerCamelCase_ = node_to_insert
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = node
lowerCamelCase_ = node.next
if node.get_next() is None:
lowerCamelCase_ = node_to_insert
else:
lowerCamelCase_ = node_to_insert
lowerCamelCase_ = node_to_insert
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = 1
lowerCamelCase_ = Node(lowercase )
lowerCamelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase )
return
current_position += 1
lowerCamelCase_ = node.next
self.insert_after_node(self.tail , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Node:
lowerCamelCase_ = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase_ = node.get_next()
raise Exception("Node not found" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
if (node := self.get_node(lowercase )) is not None:
if node == self.head:
lowerCamelCase_ = self.head.get_next()
if node == self.tail:
lowerCamelCase_ = self.tail.get_previous()
self.remove_node_pointers(lowercase )
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> None:
if node.get_next():
lowerCamelCase_ = node.previous
if node.get_previous():
lowerCamelCase_ = node.next
lowerCamelCase_ = None
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.head is None
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase = '''\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'''
class UpperCAmelCase ( __a):
'''simple docstring'''
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=_SCREAMING_SNAKE_CASE , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE)
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
a_ =logging.get_logger("transformers-cli/converting")
self._logger.info(f"""Loading model {model_type}""")
a_ =model_type
a_ =tf_checkpoint
a_ =pytorch_dump_output
a_ =config
a_ =finetuning_task_name
def lowercase_ ( self) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
if "ckpt" in self._tf_checkpoint.lower():
a_ =self._tf_checkpoint
a_ =""
else:
a_ =self._tf_checkpoint
a_ =""
convert_transfo_xl_checkpoint_to_pytorch(
_SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , _SCREAMING_SNAKE_CASE)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 713 |
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A = True
except ImportError:
A = False
A = logging.get_logger(__name__) # pylint: disable=invalid-name
def a(lowercase__ ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
snake_case_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowerCamelCase__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowerCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , *__UpperCamelCase ):
"""simple docstring"""
snake_case_ = testing
snake_case_ = testing_file
snake_case_ = path
def __lowerCAmelCase ( self ):
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case_ = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
snake_case_ = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case_ = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
snake_case_ = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
snake_case_ = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
snake_case_ = json.load(lowerCamelCase__ )
snake_case_ = configuration['''lowercase_modelname''']
snake_case_ = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
snake_case_ = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
snake_case_ = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
snake_case_ = '''Flax''' in generate_tensorflow_pytorch_and_flax
snake_case_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__UpperCamelCase ):
with open(lowerCamelCase__ , 'r' ) as f:
snake_case_ = f.readlines()
with open(lowerCamelCase__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Create temp file
snake_case_ = mkstemp()
snake_case_ = False
with fdopen(lowerCamelCase__ , 'w' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
snake_case_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(__UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__UpperCamelCase ):
with open(lowerCamelCase__ ) as datafile:
snake_case_ = []
snake_case_ = False
snake_case_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case_ = line.split('"' )[1]
snake_case_ = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
snake_case_ = line.split('"' )[1]
snake_case_ = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
snake_case_ = []
elif "# Replace with" in line and "##" not in line:
snake_case_ = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCamelCase__ )
| 187 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 | 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : Any = get_logger(__name__)
__A : Union[str, Any] = Path(__file__).parent / "model_card_template.md"
__A : int = uuida().hex
__A : Optional[Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A : Union[str, Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A : Union[str, Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowercase ( _SCREAMING_SNAKE_CASE : Union[Dict, str, None] = None ):
'''simple docstring'''
_UpperCAmelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if token is None:
_UpperCAmelCase = HfFolder.get_token()
if organization is None:
_UpperCAmelCase = whoami(_SCREAMING_SNAKE_CASE )['''name''']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_SCREAMING_SNAKE_CASE , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
_UpperCAmelCase = args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , '''hub_token''' ) else None
_UpperCAmelCase = get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
_UpperCAmelCase = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
_UpperCAmelCase = re.search(r'''snapshots/([^/]+)/''' , _SCREAMING_SNAKE_CASE )
if search is None:
return None
_UpperCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Dict = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A : Optional[int] = os.path.join(hf_cache_home, "diffusers")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
_UpperCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCAmelCase = old_diffusers_cache
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : Optional[Any] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
__A : Tuple = int(f.read())
except ValueError:
__A : int = 0
if cache_version < 1:
__A : List[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A : Optional[int] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"the directory exists and can be written to."
)
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
_UpperCAmelCase = weights_name.split('''.''' )
_UpperCAmelCase = splits[:-1] + [variant] + splits[-1:]
_UpperCAmelCase = '''.'''.join(_SCREAMING_SNAKE_CASE )
return weights_name
def lowercase ( _SCREAMING_SNAKE_CASE : str , *,
_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('''0.20.0''' )
):
try:
_UpperCAmelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.' , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
_UpperCAmelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'''this model name. Check the model page at '''
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 95 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Optional[Any] , )->Any:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = Text(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Optional[Any] )->str:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
| 95 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case : Dict = TypeVar("_T")
class a (Generic[_T] ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Iterable[_T] | None = None ) -> None:
__snake_case : list[_T] = list(iterable or [] )
__snake_case : list[_T] = []
def __len__( self : Optional[int] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ) -> str:
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __snake_case ( self : Tuple , lowerCamelCase : _T ) -> None:
self._stacka.append(lowerCamelCase )
def __snake_case ( self : str ) -> _T:
__snake_case : List[str] = self._stacka.pop
__snake_case : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 81 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 0 |
from __future__ import annotations
from typing import Any
class __a :
'''simple docstring'''
def __init__( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = num_of_nodes
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : str = {}
def _a ( self , _a , _a , _a ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _a ( self , _a ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _a ( self , _a ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Dict = self.find_component(snake_case_ )
def _a ( self , _a , _a , _a ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def _a ( self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : str = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = edge
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = edge
SCREAMING_SNAKE_CASE__ : List[Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : Tuple = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _lowercase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
SCREAMING_SNAKE_CASE__ : List[Any] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ : str = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ : int = dataset
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = con
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : int = num_proc
SCREAMING_SNAKE_CASE__ : int = to_sql_kwargs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.to_sql_kwargs.pop("""sql""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""con""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""index""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = args
SCREAMING_SNAKE_CASE__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ : Any = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Optional[int] = batch.to_pandas()
SCREAMING_SNAKE_CASE__ : List[Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _a ( self , _a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 12 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
import math
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
'''simple docstring'''
return math.pow(lowerCAmelCase__ , 2 ) - a
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
return 2 * x
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE_ : str = math.pow(lowerCAmelCase__ , 2 )
return start
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 9999 , lowerCAmelCase__ : float = 0.00_000_000_000_001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_initial_point(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value - fx(lowerCAmelCase__ , lowerCAmelCase__ ) / fx_derivative(lowerCAmelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 717 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = 'src/transformers'
A = 'docs/source/en/tasks'
def _lowerCamelCase( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _lowerCamelCase( lowerCAmelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _lowerCamelCase( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
SCREAMING_SNAKE_CASE_ : Dict = get_model_list_for_task(lowerCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 97 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : int = (DPMSolverSinglestepScheduler,)
_SCREAMING_SNAKE_CASE : str = (('num_inference_steps', 25),)
def lowerCAmelCase ( self : Any , **snake_case_ : int ):
__snake_case = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase ( self : int , snake_case_ : Dict=0 , **snake_case_ : Optional[int] ):
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop("num_inference_steps" , snake_case_ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**snake_case_ )
__snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
__snake_case = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
__snake_case = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : str ):
pass
def lowerCAmelCase ( self : List[Any] , snake_case_ : str=0 , **snake_case_ : Any ):
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop("num_inference_steps" , snake_case_ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
__snake_case = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
__snake_case = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : str , snake_case_ : Union[str, Any]=None , **snake_case_ : Union[str, Any] ):
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**snake_case_ )
__snake_case = scheduler_class(**snake_case_ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**snake_case_ )
__snake_case = scheduler_class(**snake_case_ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(snake_case_ , snake_case_ )
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def lowerCAmelCase ( self : Any ):
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(snake_case_ , snake_case_ )
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def lowerCAmelCase ( self : List[Any] ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=snake_case_ )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=snake_case_ )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowerCAmelCase ( self : str ):
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="dpmsolver++" , solver_order=snake_case_ , solver_type=snake_case_ , )
def lowerCAmelCase ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowerCAmelCase ( self : List[Any] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
__snake_case = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def lowerCAmelCase ( self : int ):
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def lowerCAmelCase ( self : int ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCAmelCase ( self : Any ):
self.check_over_configs(variance_type=snake_case_ )
self.check_over_configs(variance_type="learned_range" )
def lowerCAmelCase ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def lowerCAmelCase ( self : Dict ):
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowerCAmelCase ( self : str ):
__snake_case = self.full_loop(use_karras_sigmas=snake_case_ )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def lowerCAmelCase ( self : Dict ):
__snake_case = self.full_loop(prediction_type="v_prediction" )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case_ )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def lowerCAmelCase ( self : int ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**snake_case_ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(snake_case_ , snake_case_ )
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 163 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self : int , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 163 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 488 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 488 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCAmelCase ( snake_case_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase: str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowercase: ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowercase: str = "text"
_lowercase: str = "labels"
def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> str:
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase = copy.deepcopy(self )
_lowerCAmelCase = self.label_schema.copy()
_lowerCAmelCase = features[self.label_column]
_lowerCAmelCase = label_schema
return task_template
@property
def lowercase__ ( self : List[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 207 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : int ={
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] =[
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = torch.device("""cpu""")
def snake_case_ () -> int:
__lowerCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def snake_case_ (__A : Any ) -> Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case_ (__A : Any , __A : Tuple , __A : Dict ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = dct.pop(_UpperCamelCase )
__lowerCAmelCase : Tuple = val
def snake_case_ (__A : str ) -> int:
__lowerCAmelCase : Dict = []
for k in state_dict.keys():
__lowerCAmelCase : Tuple = k
if ".pwconv" in k:
__lowerCAmelCase : Optional[int] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__lowerCAmelCase : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__lowerCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__lowerCAmelCase : Optional[int] = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__lowerCAmelCase : str = k_new.split(""".""" )
if ls[2].isdigit():
__lowerCAmelCase : int = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__lowerCAmelCase : Optional[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case_ (__A : str , __A : Optional[Any] , __A : Tuple ) -> Dict:
__lowerCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase : List[str] = 1_0_0_0
__lowerCAmelCase : str = """huggingface/label-files"""
__lowerCAmelCase : List[Any] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : str = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = idalabel
__lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowerCAmelCase : Optional[int] = [3, 3, 6, 4]
__lowerCAmelCase : List[str] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
__lowerCAmelCase : int = [3, 3, 9, 6]
__lowerCAmelCase : Dict = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
__lowerCAmelCase : Any = [4, 3, 1_0, 5]
__lowerCAmelCase : Dict = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
__lowerCAmelCase : Optional[Any] = [4, 4, 1_2, 6]
__lowerCAmelCase : Any = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__lowerCAmelCase : str = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" , check_hash=_UpperCamelCase )
else:
__lowerCAmelCase : Tuple = torch.load(_UpperCamelCase , map_location="""cpu""" )
__lowerCAmelCase : Tuple = checkpoint
__lowerCAmelCase : Optional[Any] = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowerCAmelCase : List[str] = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__lowerCAmelCase : Tuple = processor(images=_UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
__lowerCAmelCase : List[Any] = get_expected_output(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__UpperCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 721 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ (__A : Tuple , __A : List[str] , __A : str=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , __A : str=None , __A : Tuple=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : int=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[str]=0.02 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
__lowerCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : str = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : List[str] = model.decode(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Any = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
__lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =99
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Any = lm_model(input_ids=lowerCAmelCase )
__lowerCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : List[str] = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Tuple = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : int = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : List[Any] = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Tuple = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Union[str, Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__lowerCAmelCase : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
__lowerCAmelCase : str = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowerCAmelCase : List[str] = ["""Sam"""]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
__lowerCAmelCase : Union[str, Any] = model.generate(**lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 218 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self , __a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__lowerCAmelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sgugger/tiny-distilbert-classification"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , torchscript=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , fpaa=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
# set architectures equal to `None`
__lowerCAmelCase = None
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__a , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tinier_bart"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tinier_bart"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__a , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__a , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__a , "train_time.csv" ) , env_info_csv_file=os.path.join(__a , "env.csv" ) , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "env.csv" ) ).exists() )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , "sequential" ) )
self.assertTrue(hasattr(__a , "cumulative" ) )
self.assertTrue(hasattr(__a , "current" ) )
self.assertTrue(hasattr(__a , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , "log.txt" ) , log_print=__a , trace_memory_line_by_line=__a , multi_process=__a , )
__lowerCAmelCase = PyTorchBenchmark(__a )
__lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__a , "log.txt" ) ).exists() )
| 636 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A : List[str] = pytest.mark.integration
A : Optional[Any] = {"comet"}
A : int = importlib.util.find_spec("fairseq") is not None
A : Union[str, Any] = {"code_eval"}
A : Dict = os.name == "nt"
A : Dict = {"bertscore", "frugalscore", "perplexity"}
A : Any = importlib.util.find_spec("transformers") is not None
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
@local
class _UpperCamelCase ( parameterized.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any ={}
__UpperCAmelCase : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
__lowerCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__lowerCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case ( self , __a , __a ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def snake_case ( self ):
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join("metrics" , __a ) , *__a , **__a )
with patch("datasets.load_metric" ) as mock_load_metric:
__lowerCAmelCase = load_local_metric
yield
@classmethod
def snake_case ( cls , __a ):
def wrapper(__a ):
__lowerCAmelCase = contextmanager(__a )
__lowerCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self , __a ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__lowerCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import torch
def bert_cos_score_idf(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__lowerCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def load_from_checkpoint(_UpperCamelCase ):
class _UpperCamelCase :
'''simple docstring'''
def snake_case ( self , __a , *__a , **__a ):
assert len(__a ) == 2
__lowerCAmelCase = [0.1_9, 0.9_2]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__lowerCAmelCase = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__lowerCAmelCase = load_from_checkpoint
yield
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = load_metric(os.path.join("metrics" , "seqeval" ) )
__lowerCAmelCase = "ERROR"
__lowerCAmelCase = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCamelCase )
| 636 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ = logging.get_logger(__name__)
A_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class __lowercase ( __UpperCAmelCase ):
lowercase = """longformer"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] = 5_12 , __lowerCamelCase : Any = 2 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[int] = 0 , __lowerCamelCase : List[str] = 2 , __lowerCamelCase : Dict = 3_05_22 , __lowerCamelCase : Optional[int] = 7_68 , __lowerCamelCase : Tuple = 12 , __lowerCamelCase : Dict = 12 , __lowerCamelCase : Dict = 30_72 , __lowerCamelCase : Any = "gelu" , __lowerCamelCase : Dict = 0.1 , __lowerCamelCase : Optional[Any] = 0.1 , __lowerCamelCase : Union[str, Any] = 5_12 , __lowerCamelCase : str = 2 , __lowerCamelCase : Optional[Any] = 0.02 , __lowerCamelCase : List[str] = 1E-12 , __lowerCamelCase : Any = False , **__lowerCamelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase = attention_window
lowercase = sep_token_id
lowercase = bos_token_id
lowercase = eos_token_id
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = onnx_export
class __lowercase ( __UpperCAmelCase ):
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] = "default" , __lowerCamelCase : Union[str, Any] = None ) -> Any:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase = True
@property
def __a ( self : Dict ) -> List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __a ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = super().outputs
if self.task == "default":
lowercase = {0: '''batch'''}
return outputs
@property
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
return 1E-4
@property
def __a ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def __a ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : Any = False , __lowerCamelCase : Optional[Any] = None , ) -> Optional[int]:
'''simple docstring'''
lowercase = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowercase = 1
return inputs
| 711 | import logging
from transformers import PretrainedConfig
A_ = logging.getLogger(__name__)
A_ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __lowercase ( _A ):
lowercase = 'bertabs'
def __init__( self : Dict , __lowerCamelCase : Tuple=3_05_22 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=6 , __lowerCamelCase : int=7_68 , __lowerCamelCase : int=8 , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : Union[str, Any]=0.2 , **__lowerCamelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 479 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''microsoft/speecht5_tts'''
_lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_lowerCamelCase = '''text_reader'''
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ['''text''']
_lowerCamelCase = ['''audio''']
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.post_processor is None:
snake_case_ : str = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.pre_processor(text=_lowercase , return_tensors="""pt""" , truncation=_lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
snake_case_ : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
snake_case_ : Union[str, Any] = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_lowercase ).cpu().detach()
| 58 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_SCREAMING_SNAKE_CASE = F'''https://www.google.com/search?q={query}&num=100'''
_SCREAMING_SNAKE_CASE = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_SCREAMING_SNAKE_CASE = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 534 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = tempfile.mkdtemp()
# fmt: off
__lowercase : List[str] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowercase : Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowercase : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowercase : List[str] = {'''unk_token''': '''<unk>'''}
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
__lowercase : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowercase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> str:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : int = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
__lowercase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Any = self.get_image_processor(do_normalize=UpperCamelCase_ )
__lowercase : Dict = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Dict = self.prepare_image_inputs()
__lowercase : List[Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Optional[Any] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> str:
__lowercase : Dict = self.get_image_processor()
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Optional[int] = '''lower newer'''
__lowercase : Any = processor(text=UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : int = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : str = '''lower newer'''
__lowercase : Tuple = self.prepare_image_inputs()
__lowercase : Optional[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = '''google/owlvit-base-patch32'''
__lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowercase : Tuple = ['''cat''', '''nasa badge''']
__lowercase : Tuple = processor(text=UpperCamelCase_ )
__lowercase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Any:
__lowercase : Tuple = '''google/owlvit-base-patch32'''
__lowercase : int = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowercase : int = [['''cat''', '''nasa badge'''], ['''person''']]
__lowercase : Tuple = processor(text=UpperCamelCase_ )
__lowercase : Tuple = 16
__lowercase : Any = len(UpperCamelCase_ )
__lowercase : Optional[Any] = max([len(UpperCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[Any] = '''google/owlvit-base-patch32'''
__lowercase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowercase : str = ['''cat''', '''nasa badge''']
__lowercase : Optional[Any] = processor(text=UpperCamelCase_ )
__lowercase : Tuple = 16
__lowercase : int = inputs['''input_ids''']
__lowercase : List[Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : Any = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Union[str, Any] = self.prepare_image_inputs()
__lowercase : List[Any] = self.prepare_image_inputs()
__lowercase : Tuple = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowercase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Any = processor.batch_decode(UpperCamelCase_ )
__lowercase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 76 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'The column name of the images in the files.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE = self.validation_dir
_SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'})
@dataclass
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'})
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE = ds["""train"""].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE = split["""train"""]
_SCREAMING_SNAKE_CASE = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
if training_args.do_train:
_SCREAMING_SNAKE_CASE = ds["""train"""].column_names
else:
_SCREAMING_SNAKE_CASE = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE = """image"""
elif "img" in column_names:
_SCREAMING_SNAKE_CASE = """img"""
else:
_SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE = image_processor.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = (image_processor.size["""height"""], image_processor.size["""width"""])
_SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [transforms(SCREAMING_SNAKE_CASE_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE_ )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 591 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __snake_case : int = 1_0_1 ) -> Tuple:
__magic_name__: Optional[Any] = length
def __len__( self : int ) -> Tuple:
return self.length
def __getitem__( self : Dict , __snake_case : str ) -> int:
return i
class __A :
def __call__( self : int , __snake_case : List[str] ) -> List[Any]:
return {"input_ids": torch.tensor(__snake_case ), "labels": torch.tensor(__snake_case )}
class __A ( nn.Module ):
def __init__( self : Any ) -> Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__magic_name__: List[str] = nn.Linear(1_2_0 , 8_0 )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[str]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_neuroncore
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Tuple = self.get_auto_remove_tmp_dir()
__magic_name__: Any = F'--output_dir {output_dir}'.split()
__magic_name__: Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__: List[Any] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__: Union[str, Any] = F'--output_dir {output_dir}'.split()
__magic_name__: Union[str, Any] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCamelCase = HfArgumentParser((TrainingArguments,))
__lowerCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCamelCase = DummyDataset(dataset_length)
def a ( __UpperCAmelCase : EvalPrediction ) -> Dict:
__magic_name__: Any = list(range(len(__UpperCAmelCase ) ) )
__magic_name__: List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = 2
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = None
| 213 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213 | 1 |
def a (lowerCAmelCase__ = 1_000 ):
__a , __a = 1, 1
__a = 2
while True:
__a = 0
__a = fa + fa
__a , __a = fa, f
index += 1
for _ in str(lowerCAmelCase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 99 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase_ ( ):
a_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=A__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=A__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=A__ )
return parser.parse_args()
def UpperCamelCase_ ( ):
a_ = parse_args()
# Import training_script as a module.
a_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a_ = script_fpath.stem
a_ = importlib.import_module(A__ )
# Patch sys.argv
a_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowercase__ ={
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = RobertaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
a_ = add_prefix_space
a_ = pre_tok_class(**UpperCAmelCase )
a_ = add_prefix_space
a_ = """post_processor"""
a_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
a_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ = tuple(state["""sep"""] )
if "cls" in state:
a_ = tuple(state["""cls"""] )
a_ = False
if state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = add_prefix_space
a_ = True
if state.get("""trim_offsets""" , UpperCAmelCase ) != trim_offsets:
a_ = trim_offsets
a_ = True
if changes_to_apply:
a_ = getattr(UpperCAmelCase , state.pop("""type""" ) )
a_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
a_ = value
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 511 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Union[str, Any] = "geglu" , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Any = False , UpperCAmelCase_ : str = False , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : int = True , UpperCAmelCase_ : List[str] = "layer_norm" , UpperCAmelCase_ : Optional[int] = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = only_cross_attention
SCREAMING_SNAKE_CASE : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
SCREAMING_SNAKE_CASE : List[str] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : Union[str, Any] = AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[str] = AdaLayerNormZero(UpperCAmelCase_ , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Attention(
query_dim=UpperCAmelCase_ , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE : Dict = (
AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : str = Attention(
query_dim=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , upcast_attention=UpperCAmelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE : Dict = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = FeedForward(UpperCAmelCase_ , dropout=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , final_dropout=UpperCAmelCase_ )
# let chunk size default to None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = 0
def _A ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Any = chunk_size
SCREAMING_SNAKE_CASE : Union[str, Any] = dim
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Union[str, Any] = None , ):
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : List[str] = self.norma(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Any = self.norma(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE : Any = self.attna(
UpperCAmelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE : List[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE : List[Any] = (
self.norma(UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : Any = self.attna(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE : List[str] = self.norma(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[self.ff(UpperCAmelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.ff(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[str] = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE : Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = 4 , UpperCAmelCase_ : Any = 0.0 , UpperCAmelCase_ : Dict = "geglu" , UpperCAmelCase_ : str = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = int(dim * mult )
SCREAMING_SNAKE_CASE : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE : Union[str, Any] = GELU(UpperCAmelCase_ , UpperCAmelCase_ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE : Optional[Any] = GELU(UpperCAmelCase_ , UpperCAmelCase_ , approximate="tanh" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE : str = GEGLU(UpperCAmelCase_ , UpperCAmelCase_ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE : Union[str, Any] = ApproximateGELU(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([] )
# project in
self.net.append(UpperCAmelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
for module in self.net:
SCREAMING_SNAKE_CASE : Union[str, Any] = module(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str = "none" ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = approximate
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.gelu(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , dim_out * 2 )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _A ( self : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = self.proj(UpperCAmelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.proj(UpperCAmelCase_ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.linear(self.silu(self.emb(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : int = torch.chunk(UpperCAmelCase_ , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.norm(UpperCAmelCase_ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = CombinedTimestepLabelEmbeddings(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.SiLU()
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , 6 * embedding_dim , bias=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ , eps=1E-6 )
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[int] = self.linear(self.silu(self.emb(UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE : Dict = self.norm(UpperCAmelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Dict = 1E-5 ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE : Any = eps
if act_fn is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
else:
SCREAMING_SNAKE_CASE : Any = get_activation(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.Linear(UpperCAmelCase_ , out_dim * 2 )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
if self.act:
SCREAMING_SNAKE_CASE : Optional[int] = self.act(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.linear(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = emb[:, :, None, None]
SCREAMING_SNAKE_CASE : Optional[Any] = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE : Any = F.group_norm(UpperCAmelCase_ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE : Optional[Any] = x * (1 + scale) + shift
return x
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]:
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("""csv""" , data_files=snake_case__)
_A = list(ds[list(files.keys())[0]].features.keys())
_A = features_name.pop(snake_case__)
_A = list(set(ds[list(files.keys())[0]][label_name]))
_A = {label: i for i, label in enumerate(snake_case__)}
_A = tokenizer.model_input_names
_A = {}
if len(snake_case__) == 1:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , )
elif len(snake_case__) == 2:
for k in files.keys():
_A = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
_A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowerCamelCase :int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ :EvalPrediction) -> Dict:
_A = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , """eval_results.txt""")
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(snake_case__)
return results
if __name__ == "__main__":
main()
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
import os
from datetime import datetime as dt
from github import Github
a__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
_a : Tuple = g.get_repo('''huggingface/diffusers''' )
_a : str = repo.get_issues(state='''open''' )
for issue in open_issues:
_a : Optional[int] = sorted(issue.get_comments() ,key=lambda __a : i.created_at ,reverse=__a )
_a : Union[str, Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 14 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = b.T
lowercase_ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
lowercase_ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
lowercase_ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = x.reshape(-1 , 3 )
lowercase_ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = ["pixel_values"]
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
lowercase_ = get_size_dict(lowerCAmelCase_)
lowercase_ = np.array(lowerCAmelCase_) if clusters is not None else None
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_normalize
lowercase_ = do_color_quantize
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''')
return resize(
lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
lowercase_ = rescale(image=lowerCAmelCase_ , scale=1 / 127.5 , data_format=lowerCAmelCase_)
lowercase_ = image - 1
return image
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowerCAmelCase_)
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ = clusters if clusters is not None else self.clusters
lowercase_ = np.array(lowerCAmelCase_)
lowercase_ = make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowerCAmelCase_) for image in images]
if do_color_quantize:
lowercase_ = [to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ = np.array(lowerCAmelCase_)
lowercase_ = color_quantize(lowerCAmelCase_ , lowerCAmelCase_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
lowercase_ = images.shape[0]
lowercase_ = images.reshape(lowerCAmelCase_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ = list(lowerCAmelCase_)
else:
lowercase_ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
lowercase_ = {"""input_ids""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
| 703 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : List[str] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase : Tuple = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase : Optional[int] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase : Any = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
def __call__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
lowercase_ = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [titles]
lowercase_ = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [texts]
lowercase_ = len(lowerCAmelCase_)
lowercase_ = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [questions] * n_passages
if len(lowerCAmelCase_) != len(lowerCAmelCase_):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCAmelCase_)} titles and {len(lowerCAmelCase_)} texts.''')
lowercase_ = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_)["""input_ids"""]
lowercase_ = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_)["""input_ids"""]
lowercase_ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_)
]
}
if return_attention_mask is not False:
lowercase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowercase_ = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : BatchEncoding , lowerCAmelCase_ : DPRReaderOutput , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 4 , ):
"""simple docstring"""
lowercase_ = reader_input["""input_ids"""]
lowercase_ , lowercase_ , lowercase_ = reader_output[:3]
lowercase_ = len(lowerCAmelCase_)
lowercase_ = sorted(range(lowerCAmelCase_) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__)
lowercase_ = []
for doc_id in sorted_docs:
lowercase_ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowercase_ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ = sequence_ids.index(self.pad_token_id)
else:
lowercase_ = len(lowerCAmelCase_)
lowercase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase_) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = []
for start_index, start_score in enumerate(lowerCAmelCase_):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowercase_ = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[1] , reverse=lowerCAmelCase_)
lowercase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
lowercase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase_) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["input_ids", "attention_mask"]
| 100 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''EncodecFeatureExtractor'''
a__ =('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , A , A ) -> Any:
super().__init__(A , A )
_UpperCAmelCase : Optional[int] = self.feature_extractor
_UpperCAmelCase : List[Any] = False
def __lowerCAmelCase ( self , A=None , A=None , A=True ) -> Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
_UpperCAmelCase : List[Any] = kwargs.pop('''audio''' , A )
_UpperCAmelCase : str = kwargs.pop('''sampling_rate''' , A )
_UpperCAmelCase : Optional[int] = kwargs.pop('''text''' , A )
if len(A ) > 0:
_UpperCAmelCase : Union[str, Any] = args[0]
_UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(A , **A )
if audio is not None:
_UpperCAmelCase : Tuple = self.feature_extractor(A , *A , sampling_rate=A , **A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase : Any = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_UpperCAmelCase : int = audio_inputs['''padding_mask''']
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Any:
_UpperCAmelCase : List[Any] = kwargs.pop('''audio''' , A )
_UpperCAmelCase : Tuple = kwargs.pop('''padding_mask''' , A )
if len(A ) > 0:
_UpperCAmelCase : Optional[int] = args[0]
_UpperCAmelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(A , padding_mask=A )
else:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Optional[int]:
return self.tokenizer.decode(*A , **A )
def __lowerCAmelCase ( self , A , A = None ) -> List[np.ndarray]:
_UpperCAmelCase : List[str] = to_numpy(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = audio_values.shape
if padding_mask is None:
return list(A )
_UpperCAmelCase : Union[str, Any] = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase : Union[str, Any] = seq_len - padding_mask.shape[-1]
_UpperCAmelCase : str = 1 - self.feature_extractor.padding_value
_UpperCAmelCase : Optional[int] = np.pad(A , ((0, 0), (0, difference)) , '''constant''' , constant_values=A )
_UpperCAmelCase : List[str] = audio_values.tolist()
for i in range(A ):
_UpperCAmelCase : List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase : List[Any] = sliced_audio.reshape(A , -1 )
return audio_values
| 506 |
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Tuple = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : Dict = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Tuple = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 506 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_a : List[str]= logging.get_logger(__name__)
_a : Tuple= {"vocab_file": "vocab.txt"}
_a : str= {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_a : int= {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
with open(UpperCAmelCase_ , 'r' ) as f:
__snake_case : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ["""input_ids""", """attention_mask"""]
def __init__(self : Optional[Any] , _A : Optional[Any] , _A : List[str]="<unk>" , _A : Union[str, Any]="<cls>" , _A : str="<pad>" , _A : List[str]="<mask>" , _A : Optional[int]="<eos>" , **_A : List[str] , ) -> Any:
super().__init__(**_A)
__snake_case : Union[str, Any] = load_vocab_file(_A)
__snake_case : Tuple = dict(enumerate(self.all_tokens))
__snake_case : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens)}
__snake_case : Tuple = unk_token
__snake_case : str = cls_token
__snake_case : Union[str, Any] = pad_token
__snake_case : str = mask_token
__snake_case : Union[str, Any] = eos_token
__snake_case : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def _lowercase (self : Optional[Any] , _A : int) -> str:
return self._id_to_token.get(_A , self.unk_token)
def _lowercase (self : List[str] , _A : str) -> int:
return self._token_to_id.get(_A , self._token_to_id.get(self.unk_token))
def _lowercase (self : Tuple , _A : Tuple , **_A : Union[str, Any]) -> Optional[int]:
return text.split()
def _lowercase (self : str , _A : str=False) -> Any:
return len(self._id_to_token)
def _lowercase (self : Dict) -> Optional[Any]:
return {token: i for i, token in enumerate(self.all_tokens)}
def _lowercase (self : str , _A : str) -> int:
return self._token_to_id.get(_A , self._token_to_id.get(self.unk_token))
def _lowercase (self : str , _A : int) -> str:
return self._id_to_token.get(_A , self.unk_token)
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : int = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowercase (self : Any , _A : List , _A : Optional[List] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__snake_case : Any = [1] + ([0] * len(_A)) + [1]
if token_ids_a is not None:
mask += [0] * len(_A) + [1]
return mask
def _lowercase (self : List[str] , _A : List[Any] , _A : Optional[Any]) -> List[str]:
__snake_case : str = os.path.join(_A , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(_A , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def _lowercase (self : str) -> int:
return self.get_vocab_size(with_added_tokens=_A)
def _lowercase (self : Optional[Any] , _A : Union[List[str], List[AddedToken]] , _A : bool = False) -> int:
return super()._add_tokens(_A , special_tokens=_A)
| 192 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : str ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = int(UpperCAmelCase_ )
# Initialize Result
__snake_case : int = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase_ ):
# Find denominations
while int(UpperCAmelCase_ ) >= int(UpperCAmelCase_ ):
total_value -= int(UpperCAmelCase_ )
answer.append(UpperCAmelCase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : Optional[int]= []
_a : Optional[int]= "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_a : int= int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
_a : Optional[int]= input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_a : Tuple= [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
_a : List[str]= input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
_a : List[Any]= find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 192 | 1 |
"""simple docstring"""
class A__ :
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = name
__lowerCAmelCase : Tuple = value
__lowerCAmelCase : Union[str, Any] = weight
def __repr__( self: Optional[int]) -> Tuple:
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
return self.value
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
return self.name
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
return self.weight
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
return self.value / self.weight
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : int = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[str]:
__lowerCAmelCase : Optional[int] = sorted(__snake_case ,key=__snake_case ,reverse=__snake_case )
__lowerCAmelCase : str = []
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowercase ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 293 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case : Union[str, Any] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case : Any = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : int = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__snake_case )[0]
@deprecated(__snake_case ,"Please use tf.data to implement this functionality." )
def _lowercase ( __snake_case ) -> Union[str, Any]:
print("Extracting" ,f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__lowerCAmelCase : Union[str, Any] = _readaa(__snake_case )
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
__lowerCAmelCase : List[str] = _readaa(__snake_case )
__lowerCAmelCase : Union[str, Any] = _readaa(__snake_case )
__lowerCAmelCase : int = _readaa(__snake_case )
__lowerCAmelCase : int = bytestream.read(rows * cols * num_images )
__lowerCAmelCase : Optional[Any] = numpy.frombuffer(__snake_case ,dtype=numpy.uinta )
__lowerCAmelCase : str = data.reshape(__snake_case ,__snake_case ,__snake_case ,1 )
return data
@deprecated(__snake_case ,"Please use tf.one_hot on tensors." )
def _lowercase ( __snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Union[str, Any] = labels_dense.shape[0]
__lowerCAmelCase : Optional[int] = numpy.arange(__snake_case ) * num_classes
__lowerCAmelCase : int = numpy.zeros((num_labels, num_classes) )
__lowerCAmelCase : str = 1
return labels_one_hot
@deprecated(__snake_case ,"Please use tf.data to implement this functionality." )
def _lowercase ( __snake_case ,__snake_case=False ,__snake_case=10 ) -> str:
print("Extracting" ,f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__lowerCAmelCase : List[str] = _readaa(__snake_case )
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
__lowerCAmelCase : Union[str, Any] = _readaa(__snake_case )
__lowerCAmelCase : Union[str, Any] = bytestream.read(__snake_case )
__lowerCAmelCase : Dict = numpy.frombuffer(__snake_case ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__snake_case ,__snake_case )
return labels
class A__ :
'''simple docstring'''
@deprecated(
_SCREAMING_SNAKE_CASE , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: int=False , _SCREAMING_SNAKE_CASE: str=dtypes.floataa , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__lowerCAmelCase : Optional[Any] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype)
if fake_data:
__lowerCAmelCase : Tuple = 1_0000
__lowerCAmelCase : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__lowerCAmelCase : Union[str, Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCAmelCase : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCAmelCase : Dict = images.astype(numpy.floataa)
__lowerCAmelCase : int = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0)
__lowerCAmelCase : Optional[Any] = images
__lowerCAmelCase : int = labels
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : Optional[int] = 0
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Tuple:
"""simple docstring"""
return self._images
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
return self._labels
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
return self._num_examples
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
return self._epochs_completed
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int=False , _SCREAMING_SNAKE_CASE: List[str]=True) -> int:
"""simple docstring"""
if fake_data:
__lowerCAmelCase : Dict = [1] * 784
__lowerCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE)],
)
__lowerCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCAmelCase : Any = numpy.arange(self._num_examples)
numpy.random.shuffle(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.images[perma]
__lowerCAmelCase : Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCAmelCase : Tuple = self._num_examples - start
__lowerCAmelCase : List[str] = self._images[start : self._num_examples]
__lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCAmelCase : Tuple = numpy.arange(self._num_examples)
numpy.random.shuffle(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.images[perm]
__lowerCAmelCase : Dict = self.labels[perm]
# Start next epoch
__lowerCAmelCase : str = 0
__lowerCAmelCase : Dict = batch_size - rest_num_examples
__lowerCAmelCase : str = self._index_in_epoch
__lowerCAmelCase : Optional[Any] = self._images[start:end]
__lowerCAmelCase : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__lowerCAmelCase : int = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__snake_case ,"Please write your own downloading logic." )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
if not gfile.Exists(__snake_case ):
gfile.MakeDirs(__snake_case )
__lowerCAmelCase : Tuple = os.path.join(__snake_case ,__snake_case )
if not gfile.Exists(__snake_case ):
urllib.request.urlretrieve(__snake_case ,__snake_case ) # noqa: S310
with gfile.GFile(__snake_case ) as f:
__lowerCAmelCase : Union[str, Any] = f.size()
print("Successfully downloaded" ,__snake_case ,__snake_case ,"bytes." )
return filepath
@deprecated(
__snake_case ,"Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _lowercase ( __snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=dtypes.floataa ,__snake_case=True ,__snake_case=5_000 ,__snake_case=None ,__snake_case=DEFAULT_SOURCE_URL ,) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__snake_case ,one_hot=__snake_case ,dtype=__snake_case ,seed=__snake_case )
__lowerCAmelCase : Union[str, Any] = fake()
__lowerCAmelCase : Optional[Any] = fake()
__lowerCAmelCase : List[Any] = fake()
return _Datasets(train=__snake_case ,validation=__snake_case ,test=__snake_case )
if not source_url: # empty string check
__lowerCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
__lowerCAmelCase : Dict = "train-images-idx3-ubyte.gz"
__lowerCAmelCase : int = "train-labels-idx1-ubyte.gz"
__lowerCAmelCase : List[str] = "t10k-images-idx3-ubyte.gz"
__lowerCAmelCase : Any = "t10k-labels-idx1-ubyte.gz"
__lowerCAmelCase : Any = _maybe_download(
__snake_case ,__snake_case ,source_url + train_images_file )
with gfile.Open(__snake_case ,"rb" ) as f:
__lowerCAmelCase : Union[str, Any] = _extract_images(__snake_case )
__lowerCAmelCase : Optional[int] = _maybe_download(
__snake_case ,__snake_case ,source_url + train_labels_file )
with gfile.Open(__snake_case ,"rb" ) as f:
__lowerCAmelCase : Optional[int] = _extract_labels(__snake_case ,one_hot=__snake_case )
__lowerCAmelCase : Optional[int] = _maybe_download(
__snake_case ,__snake_case ,source_url + test_images_file )
with gfile.Open(__snake_case ,"rb" ) as f:
__lowerCAmelCase : List[Any] = _extract_images(__snake_case )
__lowerCAmelCase : str = _maybe_download(
__snake_case ,__snake_case ,source_url + test_labels_file )
with gfile.Open(__snake_case ,"rb" ) as f:
__lowerCAmelCase : List[Any] = _extract_labels(__snake_case ,one_hot=__snake_case )
if not 0 <= validation_size <= len(__snake_case ):
__lowerCAmelCase : Tuple = (
"Validation size should be between 0 and "
F"""{len(__snake_case )}. Received: {validation_size}."""
)
raise ValueError(__snake_case )
__lowerCAmelCase : Any = train_images[:validation_size]
__lowerCAmelCase : Any = train_labels[:validation_size]
__lowerCAmelCase : List[str] = train_images[validation_size:]
__lowerCAmelCase : Optional[Any] = train_labels[validation_size:]
__lowerCAmelCase : Dict = {"dtype": dtype, "reshape": reshape, "seed": seed}
__lowerCAmelCase : str = _DataSet(__snake_case ,__snake_case ,**__snake_case )
__lowerCAmelCase : Dict = _DataSet(__snake_case ,__snake_case ,**__snake_case )
__lowerCAmelCase : Union[str, Any] = _DataSet(__snake_case ,__snake_case ,**__snake_case )
return _Datasets(train=__snake_case ,validation=__snake_case ,test=__snake_case ) | 293 | 1 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__lowercase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__lowercase , help='where to store parsed gold_data_path file' , )
__UpperCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__UpperCamelCase = json.load(__lowercase )
for dpr_record in tqdm(__lowercase ):
__UpperCamelCase = dpr_record['question']
__UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__lowercase ) + '\n' )
if __name__ == "__main__":
main()
| 434 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] =logging.get_logger(__name__)
a__ : int ={
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="xlnet"
SCREAMING_SNAKE_CASE_ : List[str] =["mems"]
SCREAMING_SNAKE_CASE_ : Dict ={
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __A : Optional[Any]=3_2_0_0_0 , __A : int=1_0_2_4 , __A : Tuple=2_4 , __A : Dict=1_6 , __A : str=4_0_9_6 , __A : List[str]="gelu" , __A : int=True , __A : str="bi" , __A : List[str]=0.02 , __A : List[Any]=1e-12 , __A : Optional[Any]=0.1 , __A : str=5_1_2 , __A : Any=None , __A : str=True , __A : Dict=False , __A : str=False , __A : Tuple=-1 , __A : List[Any]=False , __A : str="last" , __A : Optional[Any]=True , __A : Optional[int]="tanh" , __A : Any=0.1 , __A : List[str]=5 , __A : Tuple=5 , __A : Dict=5 , __A : str=1 , __A : Optional[Any]=2 , **__A : List[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , __A , )
__UpperCamelCase = kwargs['use_cache']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
@property
def _lowerCamelCase ( self : List[str] ):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self : int , __A : Optional[int] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 434 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 293 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__snake_case : List[Any] = parser.parse_args()
if args.model_type == "bert":
__snake_case : int = BertForMaskedLM.from_pretrained(args.model_name)
__snake_case : Tuple = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__snake_case : List[Any] = model.state_dict()
__snake_case : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
__snake_case : List[str] = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__snake_case : Optional[int] = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__snake_case : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__snake_case : Tuple = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__snake_case : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__snake_case : Union[str, Any] = state_dict['cls.predictions.decoder.weight']
__snake_case : Union[str, Any] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case : Union[str, Any] = state_dict[F"""cls.predictions.transform.dense.{w}"""]
__snake_case : List[str] = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 293 | 1 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE__ = _modexpt(snake_case__ , exponent // 2 , snake_case__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case__ , exponent - 1 , snake_case__ )) % modulo_value
def A ( snake_case__ = 17_77 , snake_case__ = 18_55 , snake_case__ = 8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = base
for _ in range(1 , snake_case__ ):
SCREAMING_SNAKE_CASE__ = _modexpt(snake_case__ , snake_case__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 616 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ : List[str] = random.Random()
def A ( snake_case__ , snake_case__=1.0 , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ = global_rng
SCREAMING_SNAKE_CASE__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : List[str]=4_0_0 , __UpperCAmelCase : Tuple=2_0_0_0 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : int=1_6_0_0_0 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=8_0 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : int=6_4 , __UpperCAmelCase : Any="hann_window" , __UpperCAmelCase : Tuple=8_0 , __UpperCAmelCase : Tuple=7_6_0_0 , __UpperCAmelCase : List[Any]=1e-10 , __UpperCAmelCase : Tuple=True , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = min_seq_length
SCREAMING_SNAKE_CASE__ = max_seq_length
SCREAMING_SNAKE_CASE__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = fmin
SCREAMING_SNAKE_CASE__ = fmax
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = return_attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=False ) -> Optional[Any]:
def _flatten(__UpperCAmelCase : List[Any] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=False ) -> str:
if equal_length:
SCREAMING_SNAKE_CASE__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Any ) -> Tuple:
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = np.random.rand(1_0_0 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = min(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> str:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __UpperCAmelCase , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __UpperCAmelCase , atol=1e-4 ) )
| 616 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 674 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase ) | 156 | 0 |
def _lowerCAmelCase ( A__: dict ):
'''simple docstring'''
UpperCAmelCase = set()
# edges = list of graph's edges
UpperCAmelCase = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase , UpperCAmelCase = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def _lowerCAmelCase ( A__: dict ):
'''simple docstring'''
UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 391 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__magic_name__ = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(A__ )
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """rag"""
__SCREAMING_SNAKE_CASE = True
def __init__( self , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=" / " , _snake_case=" // " , _snake_case=5 , _snake_case=300 , _snake_case=768 , _snake_case=8 , _snake_case="wiki_dpr" , _snake_case="train" , _snake_case="compressed" , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ) -> str:
"""simple docstring"""
super().__init__(
bos_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , is_encoder_decoder=_snake_case , prefix=_snake_case , vocab_size=_snake_case , **_snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase = kwargs.pop('''question_encoder''' )
UpperCAmelCase = question_encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''generator''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(_snake_case , **_snake_case )
UpperCAmelCase = AutoConfig.for_model(_snake_case , **_snake_case )
UpperCAmelCase = reduce_loss
UpperCAmelCase = label_smoothing
UpperCAmelCase = exclude_bos_score
UpperCAmelCase = do_marginalize
UpperCAmelCase = title_sep
UpperCAmelCase = doc_sep
UpperCAmelCase = n_docs
UpperCAmelCase = max_combined_length
UpperCAmelCase = dataset
UpperCAmelCase = dataset_split
UpperCAmelCase = index_name
UpperCAmelCase = retrieval_vector_size
UpperCAmelCase = retrieval_batch_size
UpperCAmelCase = passages_path
UpperCAmelCase = index_path
UpperCAmelCase = use_dummy_dataset
UpperCAmelCase = output_retrieved
UpperCAmelCase = do_deduplication
UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase = getattr(self.generator , '''forced_eos_token_id''' , _snake_case )
@classmethod
def snake_case_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_snake_case )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.question_encoder.to_dict()
UpperCAmelCase = self.generator.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 391 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Optional[int] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class snake_case__ ( _UpperCAmelCase ):
lowercase__ : List[str] = """lxmert"""
lowercase__ : int = {}
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=95_00 , lowerCAmelCase__=16_00 , lowerCAmelCase__=4_00 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-1_2 , lowerCAmelCase__=9 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=20_48 , lowerCAmelCase__=4 , lowerCAmelCase__=6.6_7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[str]:
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : List[Any] = hidden_act
__magic_name__ : int = intermediate_size
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : str = max_position_embeddings
__magic_name__ : Dict = type_vocab_size
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Optional[int] = layer_norm_eps
__magic_name__ : Optional[int] = num_qa_labels
__magic_name__ : Tuple = num_object_labels
__magic_name__ : Any = num_attr_labels
__magic_name__ : List[str] = l_layers
__magic_name__ : Dict = x_layers
__magic_name__ : Optional[int] = r_layers
__magic_name__ : Any = visual_feat_dim
__magic_name__ : Optional[int] = visual_pos_dim
__magic_name__ : int = visual_loss_normalizer
__magic_name__ : Any = task_matched
__magic_name__ : str = task_mask_lm
__magic_name__ : Dict = task_obj_predict
__magic_name__ : Optional[int] = task_qa
__magic_name__ : Union[str, Any] = visual_obj_loss
__magic_name__ : List[str] = visual_attr_loss
__magic_name__ : List[Any] = visual_feat_loss
__magic_name__ : Optional[int] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowercase__ )
| 324 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE__ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE__ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Color current vertex
snake_case_ : Dict = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ):
return True
# Backtrack
snake_case_ : List[Any] = -1
return False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : int = [-1] * len(SCREAMING_SNAKE_CASE__ )
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 ):
return colored_vertices
return []
| 480 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :int = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Dict[str, int]] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Any , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_56}
lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : List[Any] = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Union[str, Any] = do_rescale
lowerCAmelCase__ : Tuple = rescale_factor
lowerCAmelCase__ : str = do_normalize
lowerCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : str = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : int = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Any = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : int = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
lowerCAmelCase__ : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 709 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :List[nn.Module] = field(default_factory=a_ )
_lowerCamelCase :list = field(default_factory=a_ )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Tensor , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase )
def __call__( self : int , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :nn.Module
_lowerCamelCase :int = 0
_lowerCamelCase :List = field(default_factory=a_ )
_lowerCamelCase :List = field(default_factory=a_ )
def __call__( self : str , UpperCamelCase : Tensor ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Union[str, Any] = Tracker(self.src )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Any = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while"""
f""" destination module has {len(UpperCamelCase )}.""" )
for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> List[str]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : Any = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowerCAmelCase__ : int = ResNetForImageClassification(__UpperCAmelCase ).eval()
lowerCAmelCase__ : List[str] = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
lowerCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> List[str]:
lowerCAmelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> None:
"""simple docstring"""
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 477 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
return choice(A )
def UpperCAmelCase ( A : list[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = random_pivot(A )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573 | 0 |
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = np.shape(UpperCamelCase__ )
if rows != columns:
__lowerCamelCase = (
'\'table\' has to be of square shaped array but got a '
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase__ )
__lowerCamelCase = np.zeros((rows, columns) )
__lowerCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__lowerCamelCase = (table[i][j] - total) / upper[j][j]
__lowerCamelCase = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
__lowerCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 167 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='ViTImageProcessor'
lowerCamelCase__ =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , a : List[Any]=None , a : str=None , **a : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Optional[int]=None , a : Tuple=None , a : List[Any]=None , **a : str ) -> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE : List[str] = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE : int = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE : List[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE : Any = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def __UpperCamelCase ( self : Optional[Any] , *a : int , **a : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def __UpperCamelCase ( self : Optional[int] , *a : Any , **a : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor | 25 |
"""simple docstring"""
def snake_case ( _a: float , _a: float )-> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(1_25.50, 0.05) = }""")
| 510 | 0 |
a__ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 198 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Tuple=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[int]=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[str]=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[int]=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Any=None , ) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = parent
_snake_case : Optional[int] = batch_size
_snake_case : str = seq_length
_snake_case : int = is_training
_snake_case : Dict = use_input_mask
_snake_case : Tuple = use_token_type_ids
_snake_case : List[Any] = use_labels
_snake_case : List[str] = vocab_size
_snake_case : str = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : Any = num_labels
_snake_case : Dict = num_choices
_snake_case : List[Any] = scope
def UpperCamelCase_ ( self : Optional[int]) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_snake_case : List[Any] = None
if self.use_token_type_ids:
_snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_snake_case : Union[str, Any] = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[Any] = None
if self.use_labels:
_snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_snake_case : int = ids_tensor([self.batch_size] , self.num_choices)
_snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[Any]) -> Any:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
_snake_case : List[str] = LlamaModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
_snake_case : str = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = True
_snake_case : List[Any] = LlamaModel(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Union[str, Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_snake_case : Dict = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
_snake_case : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = LlamaForCausalLM(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
_snake_case : str = True
_snake_case : Optional[Any] = True
_snake_case : List[Any] = LlamaForCausalLM(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
# first forward pass
_snake_case : Tuple = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
_snake_case : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
_snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1)
_snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1)
_snake_case : Tuple = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
_snake_case : Optional[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
_snake_case : str = ids_tensor((1,) , output_from_past.shape[-1]).item()
_snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3))
def UpperCamelCase_ ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = config_and_inputs
_snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : int = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case_ : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case_ : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : Dict = False
snake_case_ : int = False
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
_snake_case : int = LlamaModelTester(self)
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : List[Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[str] = 3
_snake_case : Any = input_dict["""input_ids"""]
_snake_case : Optional[int] = input_ids.ne(1).to(lowerCAmelCase)
_snake_case : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_snake_case : Tuple = LlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Dict = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCamelCase_ ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : Optional[Any] = """single_label_classification"""
_snake_case : Optional[int] = input_dict["""input_ids"""]
_snake_case : str = input_ids.ne(1).to(lowerCAmelCase)
_snake_case : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_snake_case : str = LlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[Any] = 3
_snake_case : Any = """multi_label_classification"""
_snake_case : str = input_dict["""input_ids"""]
_snake_case : List[str] = input_ids.ne(1).to(lowerCAmelCase)
_snake_case : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_snake_case : Tuple = LlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""")
def UpperCamelCase_ ( self : Any) -> int:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)])
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = ids_tensor([1, 10] , config.vocab_size)
_snake_case : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_snake_case : Dict = LlamaModel(lowerCAmelCase)
original_model.to(lowerCAmelCase)
original_model.eval()
_snake_case : int = original_model(lowerCAmelCase).last_hidden_state
_snake_case : str = original_model(lowerCAmelCase).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_snake_case : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
_snake_case : Dict = LlamaModel(lowerCAmelCase)
scaled_model.to(lowerCAmelCase)
scaled_model.eval()
_snake_case : str = scaled_model(lowerCAmelCase).last_hidden_state
_snake_case : List[Any] = scaled_model(lowerCAmelCase).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
else:
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""")
@slow
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_snake_case : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""")
_snake_case : Tuple = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
_snake_case : Union[str, Any] = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]])
torch.testing.assert_close(out.mean(-1) , lowerCAmelCase , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case : Optional[Any] = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase , atol=1E-5 , rtol=1E-5)
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""")
@slow
def UpperCamelCase_ ( self : List[str]) -> Dict:
"""simple docstring"""
_snake_case : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_snake_case : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""")
_snake_case : List[Any] = model(torch.tensor(lowerCAmelCase))
# Expected mean on dim = -1
_snake_case : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]])
torch.testing.assert_close(out.mean(-1) , lowerCAmelCase , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case : List[str] = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase , atol=1E-5 , rtol=1E-5)
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""")
@slow
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
_snake_case : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_snake_case : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""")
_snake_case : int = model(torch.tensor(lowerCAmelCase))
# Expected mean on dim = -1
_snake_case : List[Any] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]])
torch.testing.assert_close(out.mean(-1) , lowerCAmelCase , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case : Any = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513])
# fmt: on
torch.testing.assert_close(out.mean(-1) , lowerCAmelCase , atol=1E-2 , rtol=1E-2)
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""")
@slow
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_snake_case : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""")
_snake_case : int = model(torch.tensor(lowerCAmelCase))
_snake_case : Any = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , lowerCAmelCase , atol=1E-2 , rtol=1E-2)
# fmt: off
_snake_case : Dict = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase , atol=1E-5 , rtol=1E-5)
@unittest.skip("""Model is curently gated""")
@slow
def UpperCamelCase_ ( self : Tuple) -> Any:
"""simple docstring"""
_snake_case : List[Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_snake_case : Optional[Any] = """Simply put, the theory of relativity states that """
_snake_case : Union[str, Any] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""")
_snake_case : Dict = tokenizer.encode(lowerCAmelCase , return_tensors="""pt""")
_snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=lowerCAmelCase)
# greedy generation outputs
_snake_case : Optional[int] = model.generate(lowerCAmelCase , max_new_tokens=64 , top_p=lowerCAmelCase , temperature=1 , do_sample=lowerCAmelCase)
_snake_case : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
| 198 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
# Load checkpoint
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase = v
else:
lowerCAmelCase = v
lowerCAmelCase = chkpt['params']
lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase = chkpt['dico_word2id']
lowerCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 4 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=13 , A : int=30 , A : Union[str, Any]=2 , A : Dict=3 , A : Optional[int]=True , A : Optional[int]=True , A : Dict=32 , A : List[str]=5 , A : str=4 , A : List[str]=37 , A : Union[str, Any]="gelu" , A : Tuple=0.1 , A : Optional[int]=0.1 , A : List[str]=10 , A : List[str]=0.0_2 , A : Any=3 , A : Any=None , A : int=2 , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : Optional[Any] , A : List[Any] , A : Optional[int] , A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = DeiTModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : int , A : Union[str, Any] , A : Tuple , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeiTForMaskedImageModeling(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForMaskedImageModeling(A)
model.to(A)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = DeiTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = DeiTModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear))
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A)
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : str=False) -> Dict:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(A , A , return_labels=A)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(A)
model.gradient_checkpointing_enable()
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A),
*get_values(A),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
_UpperCAmelCase = problem_type['title']
_UpperCAmelCase = problem_type['num_labels']
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
_UpperCAmelCase = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A) as warning_list:
_UpperCAmelCase = model(**A).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DeiTModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = inputs.pixel_values.to(A)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase = model(A)
| 639 |
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = original_name.split("." )[0]
UpperCAmelCase_ : List[str] = key.split("." )
UpperCAmelCase_ : Any = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
UpperCAmelCase_ : Dict = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
UpperCAmelCase_ : Optional[int] = orig_block_num - offset
UpperCAmelCase_ : Tuple = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = OrderedDict()
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
UpperCAmelCase_ : Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCAmelCase_ : List[str] = key[: key.find("proj" )]
UpperCAmelCase_ : List[Any] = key.replace(_SCREAMING_SNAKE_CASE , F'''patch_embeddings.{total_embed_found}.''' )
UpperCAmelCase_ : List[str] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCAmelCase_ : str = "poolformer.encoder." + key
if "mlp.fc1" in key:
UpperCAmelCase_ : Dict = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
UpperCAmelCase_ : int = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
UpperCAmelCase_ : List[Any] = key.replace("head" , "classifier" )
UpperCAmelCase_ : Any = value
return new_state_dict
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = PoolFormerConfig()
# set attributes based on model_name
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : str = model_name[-3:]
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Union[str, Any] = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Tuple = (1, 10_00)
# set config attributes
UpperCAmelCase_ : int = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : str = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCAmelCase_ : Tuple = [2, 2, 6, 2]
UpperCAmelCase_ : int = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : int = 4.0
UpperCAmelCase_ : Optional[int] = 0.9
elif size == "s24":
UpperCAmelCase_ : str = [4, 4, 12, 4]
UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : List[Any] = 4.0
UpperCAmelCase_ : Optional[Any] = 0.9
elif size == "s36":
UpperCAmelCase_ : str = [6, 6, 18, 6]
UpperCAmelCase_ : str = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : Any = 1E-6
UpperCAmelCase_ : Any = 0.9
elif size == "m36":
UpperCAmelCase_ : int = [6, 6, 18, 6]
UpperCAmelCase_ : Any = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : int = 1E-6
UpperCAmelCase_ : Optional[Any] = 0.95
elif size == "m48":
UpperCAmelCase_ : str = [8, 8, 24, 8]
UpperCAmelCase_ : Optional[int] = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : str = 4.0
UpperCAmelCase_ : List[str] = 1E-6
UpperCAmelCase_ : Optional[int] = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
UpperCAmelCase_ : Dict = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
UpperCAmelCase_ : Tuple = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
UpperCAmelCase_ : Optional[int] = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
UpperCAmelCase_ : List[str] = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCAmelCase_ : Any = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
UpperCAmelCase_ : str = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
UpperCAmelCase_ : Dict = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
UpperCAmelCase_ : Any = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : float = 0.0 , _lowercase : float = 1.0 ) ->int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
snake_case__ : Optional[int] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
snake_case__ : List[str] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
snake_case__ : int = '|'.join(sys.argv[1:])
snake_case__ : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
snake_case__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 592 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case__ : List[str] = ['small', 'medium', 'large']
snake_case__ : Dict = 'lm_head.decoder.weight'
snake_case__ : List[Any] = 'lm_head.weight'
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =torch.load(_lowerCamelCase )
_UpperCAmelCase =d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
snake_case__ : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case__ : str = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case__ : str = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 592 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = tf.data.AUTOTUNE
def UpperCamelCase ( ):
snake_case : int = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=__lowerCamelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=__lowerCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=__lowerCamelCase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=__lowerCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=__lowerCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=__lowerCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=__lowerCamelCase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=__lowerCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=__lowerCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=__lowerCamelCase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=__lowerCamelCase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=__lowerCamelCase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=__lowerCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=__lowerCamelCase , help="Model ID to upload to on the Hugging Face Hub." )
snake_case : Any = parser.parse_args()
return args
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
try:
if args.tpu_name:
snake_case : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
snake_case : Optional[int] = 0
for file in file_list:
snake_case : Optional[Any] = file.split("/" )[-1]
snake_case : Union[str, Any] = re.search(r"-\d+-(\d+)\.tfrecord" , __lowerCamelCase ).group(1 )
snake_case : Optional[int] = int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict=None ):
snake_case : List[Any] = count_samples(__lowerCamelCase )
snake_case : List[str] = tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
snake_case : List[str] = dataset.shuffle(len(__lowerCamelCase ) )
snake_case : Tuple = tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
snake_case : Tuple = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case : Tuple = dataset.shuffle(args.shuffle_buffer_size )
snake_case : Any = dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase )
snake_case : Union[str, Any] = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
snake_case : Tuple = dataset.prefetch(__lowerCamelCase )
return dataset
def UpperCamelCase ( __lowerCamelCase : Dict ):
if not args.no_tpu:
snake_case : Union[str, Any] = initialize_tpu(__lowerCamelCase )
snake_case : Dict = tf.distribute.TPUStrategy(__lowerCamelCase )
else:
snake_case : List[Any] = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
snake_case : List[str] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case : Tuple = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case : int = tokenizer.vocab_size
snake_case : List[Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case : Optional[Any] = count_samples(__lowerCamelCase )
snake_case : Tuple = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case : Tuple = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case : Optional[int] = TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case , snake_case : str = create_optimizer(
num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase , metrics=["accuracy"] )
def decode_fn(__lowerCamelCase : Optional[Any] ):
snake_case : Dict = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors="tf" )
def mask_with_collator(__lowerCamelCase : str ):
# TF really needs an isin() function
snake_case : Dict = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
snake_case , snake_case : Union[str, Any] = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , )
return batch
snake_case : int = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case : List[Any] = prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case : List[Any] = prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
snake_case : Tuple = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
main(args)
| 204 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : List[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = ["""YolosFeatureExtractor"""]
lowerCAmelCase__ : Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 502 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Optional[Any]=1_3 , snake_case_ : int=7 , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : Dict=True , snake_case_ : int=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=6_4 , snake_case_ : Dict=5 , snake_case_ : List[Any]=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Any=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Any=2 , snake_case_ : Dict=0.0_2 , snake_case_ : List[str]=3 , snake_case_ : Optional[int]=4 , snake_case_ : str=None , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : Dict = seq_length
snake_case__ : int = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : int = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Union[str, Any] = scope
snake_case__ : List[Any] = vocab_size - 1
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : List[str] = True
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Any = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : List[Any] = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : str = self.num_labels
snake_case__ : List[str] = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Any = self.num_labels
snake_case__ : Any = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
snake_case__ : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
snake_case__ : Union[str, Any] = output_from_no_past['''hidden_states'''][0]
snake_case__ : str = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
snake_case__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[int] = GPTNeoXModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=6_4 , num_attention_heads=8 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 1_0] , config.vocab_size )
snake_case__ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
snake_case__ : Any = original_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Optional[Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
snake_case__ : Optional[Any] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
snake_case__ : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Dict = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case__ : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
snake_case__ : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case__ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case__ : Optional[int] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 )
snake_case__ : Tuple = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
| 502 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase: ClassVar[Features] = Features({'''audio''': Audio()} )
_lowerCamelCase: ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowerCamelCase: str = "audio"
_lowerCamelCase: str = "labels"
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ) -> Tuple:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,A_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 91 |
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = 0, 0, 0
SCREAMING_SNAKE_CASE:List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE:Union[str, Any] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
SCREAMING_SNAKE_CASE:int = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 143 | 0 |
'''simple docstring'''
import math
import sys
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] ) ->List[str]:
'''simple docstring'''
_lowercase : Tuple = ''''''
try:
with open(_UpperCAmelCase , '''rb''' ) as binary_file:
_lowercase : Any = binary_file.read()
for dat in data:
_lowercase : Tuple = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
_lowercase : List[str] = {'''0''': '''0''', '''1''': '''1'''}
_lowercase , _lowercase : Optional[int] = '''''', ''''''
_lowercase : Optional[int] = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowercase : int = lexicon[curr_string]
result += last_match_id
_lowercase : Union[str, Any] = last_match_id + '''0'''
if math.loga(_UpperCAmelCase ).is_integer():
_lowercase : Optional[int] = {}
for curr_key in list(_UpperCAmelCase ):
_lowercase : Optional[Any] = lexicon.pop(_UpperCAmelCase )
_lowercase : int = new_lex
_lowercase : List[str] = last_match_id + '''1'''
index += 1
_lowercase : Dict = ''''''
return result
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->List[str]:
'''simple docstring'''
_lowercase : Tuple = 8
try:
with open(_UpperCAmelCase , '''wb''' ) as opened_file:
_lowercase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple ) ->str:
'''simple docstring'''
_lowercase : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowercase : Tuple = data_bits[counter:]
_lowercase : List[Any] = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] , snake_case_ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_lowercase : Dict = read_file_binary(_UpperCAmelCase )
_lowercase : Tuple = remove_prefix(_UpperCAmelCase )
_lowercase : Any = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 716 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any]=32 , snake_case_ : List[str]=10 , snake_case_ : Any=1_00 , snake_case_ : List[str]=10_26 , snake_case_ : Dict=True , snake_case_ : Any="data/tokenized_stories_train_wikitext103.jbl" , snake_case_ : Any="igf_context_pairs.jbl" , ) ->List[Any]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase : List[Any] = generate_datasets(
snake_case_ , snake_case_ , number=snake_case_ , min_len=10_26 , trim=snake_case_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase : int = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
_lowercase : Dict = load_gpta('''gpt2''' ).to(snake_case_ )
print('''computing perplexity on objective set''' )
_lowercase : Any = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ).item()
print('''perplexity on objective set:''' , snake_case_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : Optional[Any]=15 , snake_case_ : Dict=1_28 , snake_case_ : Tuple=1_00 , snake_case_ : Union[str, Any]="igf_model.pt" , ) ->List[Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_lowercase : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
_lowercase : List[Any] = SecondaryLearner(snake_case_ )
# Train secondary learner
_lowercase : Any = train_secondary_learner(
snake_case_ , snake_case_ , max_epochs=snake_case_ , batch_size=snake_case_ , eval_freq=1_00 , igf_model_path=snake_case_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=10_00 , snake_case_ : List[str]=16 , snake_case_ : List[Any]=1.0 , snake_case_ : Optional[Any]=recopy_gpta , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=10 , snake_case_ : Optional[Any]="gpt2_finetuned.pt" , ) ->List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
_lowercase : Optional[int] = RandomSampler(snake_case_ )
_lowercase : Union[str, Any] = DataLoader(snake_case_ , sampler=snake_case_ )
_lowercase : str = max_steps // (len(snake_case_ )) + 1
_lowercase : Optional[int] = 0
_lowercase : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case_ )
_lowercase , _lowercase , _lowercase : int = recopy_model(snake_case_ , snake_case_ , snake_case_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case_ )
secondary_learner.eval()
_lowercase : List[Any] = []
_lowercase : Optional[int] = 0
_lowercase : List[Any] = []
_lowercase : Any = []
# Compute the performance of the transformer model at the beginning
_lowercase : Tuple = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ )
for epoch in range(int(snake_case_ ) ):
for step, example in enumerate(snake_case_ ):
torch.cuda.empty_cache()
_lowercase : int = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase : Any = model(snake_case_ , labels=snake_case_ )
_lowercase : int = True
if secondary_learner is not None:
_lowercase : List[Any] = secondary_learner.forward(
torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase : Any = -1
if predicted_q < threshold:
_lowercase : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase : Union[str, Any] = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE( ) ->List[Any]:
'''simple docstring'''
_lowercase : Tuple = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=snake_case_ , default=snake_case_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=snake_case_ , default=snake_case_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=snake_case_ , type=snake_case_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=snake_case_ , default=snake_case_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=snake_case_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=snake_case_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=snake_case_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=snake_case_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=snake_case_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=snake_case_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=snake_case_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=snake_case_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=snake_case_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=snake_case_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=snake_case_ , type=snake_case_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=snake_case_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=snake_case_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=snake_case_ , type=snake_case_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
_lowercase : List[str] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
_lowercase : Optional[int] = training_secondary_learner(
snake_case_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
_lowercase : Dict = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=snake_case_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case_ , snake_case_ , snake_case_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case_ , secondary_learner=snake_case_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 411 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
for e in env_keys:
_lowerCAmelCase = int(os.environ.get(__lowerCamelCase, -1 ) )
if val >= 0:
return val
return default
def A__ ( __lowerCamelCase, __lowerCamelCase=False ):
"""simple docstring"""
_lowerCAmelCase = os.environ.get(__lowerCamelCase, str(__lowerCamelCase ) )
return strtobool(__lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def A__ ( __lowerCamelCase, __lowerCamelCase="no" ):
"""simple docstring"""
_lowerCAmelCase = os.environ.get(__lowerCamelCase, str(__lowerCamelCase ) )
return value
| 589 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = """A red cat sitting on a park bench"""
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__magic_name__ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 703 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : str=1_3 , __magic_name__ : str=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=9_9 , __magic_name__ : List[str]=3_2 , __magic_name__ : List[str]=5 , __magic_name__ : int=4 , __magic_name__ : Union[str, Any]=3_7 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[Any]=5_0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = use_labels
UpperCamelCase = scope
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : List[str] , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCamelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = BertGenerationEncoder(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Dict , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = BertGenerationDecoder(config=__magic_name__ ).to(__magic_name__ ).eval()
# first forward pass
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase_ ( self : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , *__magic_name__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = BertGenerationDecoder(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoderTester(self )
UpperCamelCase = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = """bert"""
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__magic_name__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCamelCase = model(__magic_name__ )[0]
UpperCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __magic_name__ )
UpperCamelCase = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCamelCase = model(__magic_name__ )[0]
UpperCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __magic_name__ )
UpperCamelCase = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
| 181 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 1 , UpperCamelCase = 1000 ) -> int:
"""simple docstring"""
__UpperCAmelCase : int = 1
__UpperCAmelCase : Tuple = 0
for divide_by_number in range(UpperCamelCase , digit + 1 ):
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCamelCase ):
__UpperCAmelCase : Any = len(UpperCamelCase )
__UpperCAmelCase : Optional[int] = divide_by_number
else:
has_been_divided.append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-4"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , __a : bool = True , ) -> List[str]:
super()._init_()
_UpperCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : int = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, Any]:
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith("_" )}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Any , ) -> Optional[int]:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> str:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> Any:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : List[Any] , ) -> Union[str, Any]:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> List[Any]:
_UpperCamelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase : List[str] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase : Optional[Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 624 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''xlm-prophetnet'''
_UpperCamelCase : Union[str, Any] = ['''past_key_values''']
_UpperCamelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : List[Any] , UpperCamelCase_ : Optional[float] = 0.1 , UpperCamelCase_ : Optional[Union[str, Callable]] = "gelu" , UpperCamelCase_ : Optional[int] = 30522 , UpperCamelCase_ : Optional[int] = 1024 , UpperCamelCase_ : Optional[int] = 4096 , UpperCamelCase_ : Optional[int] = 12 , UpperCamelCase_ : Optional[int] = 16 , UpperCamelCase_ : Optional[int] = 4096 , UpperCamelCase_ : Optional[int] = 12 , UpperCamelCase_ : Optional[int] = 16 , UpperCamelCase_ : Optional[float] = 0.1 , UpperCamelCase_ : Optional[float] = 0.1 , UpperCamelCase_ : Optional[int] = 512 , UpperCamelCase_ : Optional[float] = 0.0_2 , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[int] = 0 , UpperCamelCase_ : Optional[int] = 2 , UpperCamelCase_ : Optional[int] = 32 , UpperCamelCase_ : Optional[int] = 128 , UpperCamelCase_ : Optional[bool] = False , UpperCamelCase_ : Optional[float] = 0.0 , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[int] = 0 , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : Optional[int] = 2 , **UpperCamelCase_ : Dict , ):
lowerCAmelCase_ : Optional[Any] =vocab_size
lowerCAmelCase_ : List[str] =hidden_size
lowerCAmelCase_ : int =encoder_ffn_dim
lowerCAmelCase_ : Dict =num_encoder_layers
lowerCAmelCase_ : Union[str, Any] =num_encoder_attention_heads
lowerCAmelCase_ : Optional[Any] =decoder_ffn_dim
lowerCAmelCase_ : Optional[int] =num_decoder_layers
lowerCAmelCase_ : Tuple =num_decoder_attention_heads
lowerCAmelCase_ : int =max_position_embeddings
lowerCAmelCase_ : Any =init_std # Normal(0, this parameter)
lowerCAmelCase_ : Optional[int] =activation_function
# parameters for xlmprophetnet
lowerCAmelCase_ : int =ngram
lowerCAmelCase_ : Any =num_buckets
lowerCAmelCase_ : Tuple =relative_max_distance
lowerCAmelCase_ : Optional[Any] =disable_ngram_loss
lowerCAmelCase_ : Any =eps
# 3 Types of Dropout
lowerCAmelCase_ : Optional[Any] =attention_dropout
lowerCAmelCase_ : Dict =activation_dropout
lowerCAmelCase_ : Dict =dropout
lowerCAmelCase_ : int =use_cache
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , add_cross_attention=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
@property
def __A ( self : Tuple ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self : Tuple , UpperCamelCase_ : List[str] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 718 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : List[Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase_ : Tuple =label_idx
def __A ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : Any =mode.value
lowerCAmelCase_ : List[str] =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Tuple =1
lowerCAmelCase_ : Dict =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : Optional[Any] =[]
lowerCAmelCase_ : Optional[Any] =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
lowerCAmelCase_ : Dict =[]
lowerCAmelCase_ : int =[]
else:
lowerCAmelCase_ : Tuple =line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
return examples
def __A ( self : List[str] , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Any =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ : List[str] =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __A ( self : int , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : int =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : str =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : Optional[int] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : Tuple =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : Optional[int] =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : str =mode.value
lowerCAmelCase_ : Tuple =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Any =1
lowerCAmelCase_ : Union[str, Any] =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : int =[]
lowerCAmelCase_ : Tuple =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
return examples
def __A ( self : Dict , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Optional[Any] =0
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : List[str] =preds_list[example_id]
lowerCAmelCase_ : str =''''''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(UpperCamelCase_ )
example_id += 1
def __A ( self : Union[str, Any] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 305 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCamelCase : List[str] = model(__lowerCAmelCase )['last_hidden_state']
_lowerCamelCase : Tuple = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
_lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 83 |
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : str ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = psutil.Process()
lowerCamelCase__ : Union[str, Any] = False
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = -1
while True:
lowerCamelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : int = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : List[Any] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 295 | 0 |
lowerCAmelCase_ = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 703 |
from manim import *
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def A( self : Dict ) -> Tuple:
'''simple docstring'''
A = Rectangle(height=0.5 ,width=0.5 )
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A = [mem.copy() for i in range(6 )]
A = [mem.copy() for i in range(6 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('CPU' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A = [mem.copy() for i in range(1 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('GPU' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A = [mem.copy() for i in range(6 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('Model' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,)
A = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=2_4 ,)
A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=2.5 ) ,Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A = []
A = []
A = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A = 0.46 / 4
A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 110 | 0 |
def snake_case (UpperCAmelCase__ ) -> bool:
UpperCamelCase_: Union[str, Any] = 0
for ch in input_str:
UpperCamelCase_: Optional[Any] = ord(_A )
UpperCamelCase_: str = pow(2 , _A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 551 | 0 |
'''simple docstring'''
import operator as op
snake_case_ = '''scaler.pt'''
snake_case_ = '''pytorch_model'''
snake_case_ = '''random_states'''
snake_case_ = '''optimizer'''
snake_case_ = '''scheduler'''
snake_case_ = '''pytorch_model.bin'''
snake_case_ = '''pytorch_model.bin.index.json'''
snake_case_ = '''model.safetensors'''
snake_case_ = '''model.safetensors.index.json'''
snake_case_ = '''1.10.2'''
snake_case_ = '''py38'''
snake_case_ = '''4.17.0'''
snake_case_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
snake_case_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
snake_case_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
snake_case_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
snake_case_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
snake_case_ = '''2.0.1'''
snake_case_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
snake_case_ = ['''default''', '''reduce-overhead''', '''max-autotune''']
snake_case_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case_ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
snake_case_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
snake_case_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 715 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
def remove_articles(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE_ , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] =[any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 1_0_0
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Any =[rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase : int =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase : Tuple =scount * numref
lowerCamelCase : Optional[int] =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple =Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase : Tuple =ccount * numref
# KEEP
lowerCamelCase : str =sgramcounter_rep & cgramcounter_rep
lowerCamelCase : Union[str, Any] =keepgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =sgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =0
lowerCamelCase : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : Tuple =1
lowerCamelCase : int =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Tuple =keeptmpscorea / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase : Any =keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase : Optional[Any] =0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase : Optional[int] =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase : int =sgramcounter_rep - cgramcounter_rep
lowerCamelCase : Dict =delgramcounter_rep - rgramcounter
lowerCamelCase : Dict =sgramcounter_rep - rgramcounter
lowerCamelCase : Optional[int] =0
lowerCamelCase : List[Any] =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : str =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Optional[int] =deltmpscorea / len(SCREAMING_SNAKE_CASE_ )
# ADDITION
lowerCamelCase : List[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : int =1
lowerCamelCase : List[Any] =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : str =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =ssent.split(''' ''' )
lowerCamelCase : Any =csent.split(''' ''' )
lowerCamelCase : str =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Tuple =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : int =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Dict =[]
lowerCamelCase : Any =[]
for rsent in rsents:
lowerCamelCase : Any =rsent.split(''' ''' )
lowerCamelCase : int =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : str =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : int =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[Any] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[str] =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : str =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase : List[str] =sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase : int =sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase : Any =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCamelCase : Union[str, Any] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase : List[Any] =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ )
elif tokenizer == "moses":
lowerCamelCase : int =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ )
elif tokenizer == "penn":
lowerCamelCase : Any =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =sentence
if not return_str:
lowerCamelCase : Union[str, Any] =normalized_sent.split()
return normalized_sent
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] )
lowerCamelCase : str =sari_score / len(SCREAMING_SNAKE_CASE_ )
return 1_0_0 * sari_score
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> Dict:
lowerCamelCase : Optional[int] =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase : Optional[int] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Union[str, Any] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case_ ( datasets.Metric):
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCamelCase : str ={}
result.update({'''sari''': compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({'''exact''': compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 262 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=False , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=99 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[Any]=4 , __magic_name__ : str=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : str=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : List[str]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : Any=None , ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = parent
__snake_case : Any = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : int = is_training
__snake_case : Union[str, Any] = use_input_mask
__snake_case : Any = use_token_type_ids
__snake_case : Optional[Any] = use_labels
__snake_case : str = vocab_size
__snake_case : Any = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : str = num_choices
__snake_case : List[Any] = scope
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = None
__snake_case : str = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = LlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ , attention_mask=__magic_name__ )
__snake_case : int = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = True
__snake_case : Optional[int] = LlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : str = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
__snake_case : List[Any] = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
__snake_case : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Any , ) -> str:
"""simple docstring"""
__snake_case : Dict = LlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : str , ) -> Dict:
"""simple docstring"""
__snake_case : int = True
__snake_case : Optional[Any] = True
__snake_case : Union[str, Any] = LlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
__snake_case : List[str] = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
__snake_case : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : int = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
__snake_case : int = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
# select random slice
__snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase__: Tuple = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase__: Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__: Any = False
lowercase__: List[Any] = False
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = LlamaModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = 3
__snake_case : Dict = input_dict["""input_ids"""]
__snake_case : List[Any] = input_ids.ne(1 ).to(__magic_name__ )
__snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Union[str, Any] = LlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = 3
__snake_case : Optional[int] = """single_label_classification"""
__snake_case : int = input_dict["""input_ids"""]
__snake_case : Tuple = input_ids.ne(1 ).to(__magic_name__ )
__snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : str = LlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : int = """multi_label_classification"""
__snake_case : str = input_dict["""input_ids"""]
__snake_case : Any = input_ids.ne(1 ).to(__magic_name__ )
__snake_case : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : List[Any] = LlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowercase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Dict = LlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
__snake_case : Optional[int] = original_model(__magic_name__ ).last_hidden_state
__snake_case : Optional[Any] = original_model(__magic_name__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = {"""type""": scaling_type, """factor""": 10.0}
__snake_case : Optional[Any] = LlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
__snake_case : Union[str, Any] = scaled_model(__magic_name__ ).last_hidden_state
__snake_case : Union[str, Any] = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__snake_case : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__snake_case : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Union[str, Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__snake_case : Tuple = model(torch.tensor(__magic_name__ ) )
# Expected mean on dim = -1
__snake_case : Union[str, Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__snake_case : int = model(torch.tensor(__magic_name__ ) )
# Expected mean on dim = -1
__snake_case : List[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__snake_case : Optional[int] = model(torch.tensor(__magic_name__ ) )
__snake_case : Any = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__snake_case : Dict = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__snake_case : Union[str, Any] = """Simply put, the theory of relativity states that """
__snake_case : str = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , return_tensors="""pt""" )
__snake_case : Union[str, Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__magic_name__ )
# greedy generation outputs
__snake_case : List[str] = model.generate(__magic_name__ , max_new_tokens=64 , top_p=__magic_name__ , temperature=1 , do_sample=__magic_name__ )
__snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 26 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.join(args.tf_model_dir , """parameters.json""" )
_UpperCamelCase = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(""".pt""" ):
_UpperCamelCase = args.output + """.pt"""
_UpperCamelCase = OrderedDict()
with tf.device("""/CPU:0""" ):
_UpperCamelCase = tf.train.load_checkpoint(args.tf_model_dir )
_UpperCamelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_UpperCamelCase = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_UpperCamelCase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_UpperCamelCase = 8
_UpperCamelCase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/moe""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_UpperCamelCase = key_name[-9:-7]
for i in range(1_6 ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_UpperCamelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/mlp""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p1/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/ln""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/att""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_UpperCamelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_UpperCamelCase = state[:, 0, :, :]
_UpperCamelCase = state[:, 1, :, :]
_UpperCamelCase = state[:, 2, :, :]
_UpperCamelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/o/kernel""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_UpperCamelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/an""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_UpperCamelCase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_UpperCamelCase = """model.%s.weight""" % nlayer
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
if key_name.startswith("""model/wte""" ):
_UpperCamelCase = """lm_head.weight"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/wob""" ):
_UpperCamelCase = """final_logits_bias"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = state.reshape((1, -1) )
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
_UpperCamelCase = """model.last_project.weight"""
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
_UpperCamelCase = """model.last_project.bias"""
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCamelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[str] ={"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowercase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 412 |
import operator as op
_lowercase : Optional[int] ="""scaler.pt"""
_lowercase : List[Any] ="""pytorch_model"""
_lowercase : Tuple ="""random_states"""
_lowercase : Tuple ="""optimizer"""
_lowercase : Dict ="""scheduler"""
_lowercase : List[str] ="""pytorch_model.bin"""
_lowercase : Optional[int] ="""pytorch_model.bin.index.json"""
_lowercase : List[Any] ="""model.safetensors"""
_lowercase : Union[str, Any] ="""model.safetensors.index.json"""
_lowercase : str ="""1.10.2"""
_lowercase : Optional[int] ="""py38"""
_lowercase : int ="""4.17.0"""
_lowercase : str =["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
_lowercase : int =["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
_lowercase : str =["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
_lowercase : int =["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
_lowercase : Union[str, Any] =["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
_lowercase : str ="""2.0.1"""
_lowercase : Tuple =["""pdsh""", """standard""", """openmpi""", """mvapich"""]
_lowercase : List[Any] =["""default""", """reduce-overhead""", """max-autotune"""]
_lowercase : Union[str, Any] ={""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase : Optional[int] =[
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
_lowercase : int =["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
_lowercase : Optional[Any] =["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 412 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ : Optional[Any] =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase__ ):
A__ : int =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A__ : List[str] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A__ : Dict =randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ : Tuple =self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ : Any =self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
A__ : Optional[int] =(image / 2 + 0.5).clamp(0 , 1 )
A__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Any =self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """M-CLIP"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=1_0_2_4 , UpperCamelCase : Union[str, Any]=7_6_8 , **UpperCamelCase : Any )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = transformerDimSize
__SCREAMING_SNAKE_CASE : int = imageDimSize
super().__init__(**UpperCamelCase )
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = MCLIPConfig
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[Any] )->Union[str, Any]:
super().__init__(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __snake_case ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] )->int:
__SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCamelCase ), embs
| 447 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 589 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( a , a , a , a , a ):
# Load configuration defined in the metadata file
with open(a ) as metadata_file:
__snake_case = json.load(a )
__snake_case = LukeConfig(use_entity_aware_attention=a , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(a , map_location='cpu' )['module']
# Load the entity vocab file
__snake_case = load_original_entity_vocab(a )
# add an entry for [MASK2]
__snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' , lstrip=a , rstrip=a )
__snake_case = AddedToken('<ent2>' , lstrip=a , rstrip=a )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a )
with open(os.path.join(a , 'tokenizer_config.json' ) , 'r' ) as f:
__snake_case = json.load(a )
__snake_case = 'MLukeTokenizer'
with open(os.path.join(a , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(a , a )
with open(os.path.join(a , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(a , a )
__snake_case = MLukeTokenizer.from_pretrained(a )
# Initialize the embeddings of the special tokens
__snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
__snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[ent_init_index].unsqueeze(0 )
__snake_case = word_emb[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case = state_dict[bias_name]
__snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'encoder.layer.{layer_index}.attention.self.'
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case = state_dict['entity_predictions.bias']
__snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case = LukeForMaskedLM(config=a ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__snake_case = state_dict[key]
else:
__snake_case = state_dict[key]
__snake_case , __snake_case = model.load_state_dict(a , strict=a )
if set(a ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case = MLukeTokenizer.from_pretrained(a , task='entity_classification' )
__snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__snake_case = (0, 9)
__snake_case = tokenizer(a , entity_spans=[span] , return_tensors='pt' )
__snake_case = model(**a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 33, 768) )
__snake_case = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 1, 768) )
__snake_case = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case = MLukeTokenizer.from_pretrained(a )
__snake_case = 'Tokyo is the capital of <mask>.'
__snake_case = (24, 30)
__snake_case = tokenizer(a , entity_spans=[span] , return_tensors='pt' )
__snake_case = model(**a )
__snake_case = encoding['input_ids'][0].tolist()
__snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a )
__snake_case = outputs.entity_logits[0][0].argmax().item()
__snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(a ) )
model.save_pretrained(a )
def lowerCamelCase__ ( a ):
__snake_case = ['[MASK]', '[PAD]', '[UNK]']
__snake_case = [json.loads(a ) for line in open(a )]
__snake_case = {}
for entry in data:
__snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case = entity_id
break
__snake_case = f'{language}:{entity_name}'
__snake_case = entity_id
return new_mapping
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 427 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = '''gpt_neo'''
lowercase_ : Tuple = ['''past_key_values''']
lowercase_ : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=5_0_2_5_7 , __lowerCAmelCase : Tuple=2_0_4_8 , __lowerCAmelCase : str=2_0_4_8 , __lowerCAmelCase : Optional[Any]=2_4 , __lowerCAmelCase : Optional[Any]=[[["global", "local"], 1_2]] , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any=2_5_6 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=5_0_2_5_6 , __lowerCAmelCase : Any=5_0_2_5_6 , **__lowerCAmelCase : Optional[int] , ):
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@staticmethod
def lowercase__ ( __lowerCAmelCase : Optional[Any] ):
__snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( a , a , a , a ):
import torch
__snake_case = input.size()
__snake_case = len(a )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , a , a )
__snake_case = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1
__snake_case = torch.arange(a ) + low_indices[:min_length][:, None]
__snake_case = [slice(a )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def lowerCamelCase__ ( a , a ):
import torch
__snake_case = torch.arange(1 , a )
__snake_case = torch.remainder(a , a )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='floor' )
class a_ ( UpperCAmelCase__ ):
@property
def lowercase__ ( self : Optional[Any] ):
__snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='inputs' )
__snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase__ ( self : Tuple ):
return self._config.num_heads
def lowercase__ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
__snake_case = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__snake_case , __snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
__snake_case = common_inputs['attention_mask']
if self.use_past:
__snake_case = ordered_inputs['attention_mask'].dtype
__snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : str ):
return 1_3
| 427 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : List[str] , __lowercase : Optional[Any]=100 , __lowercase : Tuple=13 , __lowercase : Optional[int]=30 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[int]=3 , __lowercase : List[str]=True , __lowercase : Tuple=True , __lowercase : Tuple=32 , __lowercase : Any=5 , __lowercase : Optional[int]=4 , __lowercase : List[str]=37 , __lowercase : Dict="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=10 , __lowercase : Tuple=0.0_2 , __lowercase : Any=3 , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
__UpperCAmelCase : Optional[int] = num_patches + 1
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def A_ ( self : Tuple , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaxBeitModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , __lowercase : Any , __lowercase : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = FlaxBeitForMaskedImageModeling(config=__lowercase )
__UpperCAmelCase : Dict = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def A_ ( self : Optional[int] , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.type_sequence_label_size
__UpperCAmelCase : Tuple = FlaxBeitForImageClassification(config=__lowercase )
__UpperCAmelCase : int = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : Any = 1
__UpperCAmelCase : str = FlaxBeitForImageClassification(__lowercase )
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[int] = model(__lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = config_and_inputs
__UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaxBeitModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def A_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(__lowercase )
__UpperCAmelCase : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Tuple = self._prepare_for_class(__lowercase , __lowercase )
__UpperCAmelCase : int = model_class(__lowercase )
@jax.jit
def model_jitted(__lowercase : int , **__lowercase : Union[str, Any] ):
return model(pixel_values=__lowercase , **__lowercase )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase : Optional[Any] = model_jitted(**__lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase : Optional[Any] = model_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A_ ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__UpperCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowercase )
def lowerCamelCase_ ( ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : Optional[int] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__UpperCAmelCase : List[Any] = np.ones((1, 196) , dtype=__lowercase )
# forward pass
__UpperCAmelCase : Any = model(pixel_values=__lowercase , bool_masked_pos=__lowercase )
__UpperCAmelCase : int = outputs.logits
# verify the logits
__UpperCAmelCase : Optional[int] = (1, 196, 8_192)
self.assertEqual(logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __lowercase , atol=1e-2 ) )
@slow
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : str = image_processor(images=__lowercase , return_tensors='''np''' )
# forward pass
__UpperCAmelCase : str = model(**__lowercase )
__UpperCAmelCase : List[Any] = outputs.logits
# verify the logits
__UpperCAmelCase : int = (1, 1_000)
self.assertEqual(logits.shape , __lowercase )
__UpperCAmelCase : Dict = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__UpperCAmelCase : Optional[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : str = image_processor(images=__lowercase , return_tensors='''np''' )
# forward pass
__UpperCAmelCase : List[str] = model(**__lowercase )
__UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
__UpperCAmelCase : int = (1, 21_841)
self.assertEqual(logits.shape , __lowercase )
__UpperCAmelCase : Dict = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__UpperCAmelCase : List[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __lowercase ) | 522 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''distilbert'''
UpperCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0522 , A=512 , A=False , A=6 , A=12 , A=768 , A=4 * 768 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
_SCREAMING_SNAKE_CASE = n_layers
_SCREAMING_SNAKE_CASE = n_heads
_SCREAMING_SNAKE_CASE = dim
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = qa_dropout
_SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 314 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _A ( _snake_case ):
'''simple docstring'''
_snake_case : torch.FloatTensor
class _A ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase : int = 16 , lowerCamelCase : int = 88 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 32 , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "geglu" , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ):
'''simple docstring'''
super().__init__()
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = num_attention_heads * attention_head_dim
__lowercase = in_channels
__lowercase = torch.nn.GroupNorm(num_groups=lowerCamelCase , num_channels=lowerCamelCase , eps=1e-6 , affine=lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# 3. Define transformers blocks
__lowercase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase , lowerCamelCase , lowerCamelCase , dropout=lowerCamelCase , cross_attention_dim=lowerCamelCase , activation_fn=lowerCamelCase , attention_bias=lowerCamelCase , double_self_attention=lowerCamelCase , norm_elementwise_affine=lowerCamelCase , )
for d in range(lowerCamelCase )
] )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=None , lowerCamelCase : int=None , lowerCamelCase : Dict=None , lowerCamelCase : str=1 , lowerCamelCase : int=None , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = hidden_states.shape
__lowercase = batch_frames // num_frames
__lowercase = hidden_states
__lowercase = hidden_states[None, :].reshape(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowercase = self.norm(lowerCamelCase )
__lowercase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCamelCase , lowerCamelCase )
__lowercase = self.proj_in(lowerCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowercase = block(
lowerCamelCase , encoder_hidden_states=lowerCamelCase , timestep=lowerCamelCase , cross_attention_kwargs=lowerCamelCase , class_labels=lowerCamelCase , )
# 3. Output
__lowercase = self.proj_out(lowerCamelCase )
__lowercase = (
hidden_states[None, None, :]
.reshape(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowercase = hidden_states.reshape(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCamelCase )
| 713 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : List[Any] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Tuple = {"""text""": """string"""}
UpperCAmelCase__ : Dict = features.copy() if features else default_expected_features
UpperCAmelCase__ : Tuple = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : Optional[Any] = TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : str = {"""text""": """string"""}
UpperCAmelCase__ : Dict = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = [text_path]
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""}
UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
UpperCAmelCase__ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = tmp_path / """cache"""
UpperCAmelCase__ : Tuple = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Dict = TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase__ : Optional[int] = {"""text""": """string"""}
UpperCAmelCase__ : Optional[Any] = features.copy() if features else default_expected_features
UpperCAmelCase__ : str = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : Tuple = TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if split:
UpperCAmelCase__ : Tuple = {split: text_path}
else:
UpperCAmelCase__ : Optional[int] = """train"""
UpperCAmelCase__ : Optional[Any] = {"""train""": text_path, """test""": text_path}
UpperCAmelCase__ : List[str] = tmp_path / """cache"""
UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""}
UpperCAmelCase__ : str = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 | 0 |
'''simple docstring'''
def UpperCAmelCase ( _lowerCamelCase ):
for i in range(len(_lowerCamelCase ) - 1 , 0 , -1 ):
A : Dict = False
for j in range(_lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
A : str = unsorted[j - 1], unsorted[j]
A : Optional[Any] = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
A : Optional[Any] = unsorted[j + 1], unsorted[j]
A : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(F"""{cocktail_shaker_sort(unsorted) = }""") | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : int | str ) -> bool:
"""simple docstring"""
A__ : Dict =str(__snake_case )
return n == n[::-1]
def __lowerCamelCase ( __snake_case : int = 1_000_000 ) -> Any:
"""simple docstring"""
A__ : str =0
for i in range(1, __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 215 |
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
__UpperCAmelCase : Tuple = str(abs(_UpperCAmelCase ) )
__UpperCAmelCase : Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 329 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = DepthEstimationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCAmelCase_ )
import datasets
__UpperCAmelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase : Dict = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , UpperCAmelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "Intel/dpt-large"
__UpperCAmelCase : Optional[int] = pipeline("depth-estimation" , model=UpperCAmelCase_ )
__UpperCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase : str = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 329 | 1 |
__snake_case :Optional[Any] ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> list[str]:
'''simple docstring'''
A = set()
# keep track of all the paths to be checked
A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A = queue.pop(0 )
# get the last node from the path
A = path[-1]
if node not in explored:
A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A = list(lowerCAmelCase__ )
new_path.append(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A = [start]
A = set(lowerCAmelCase__ )
# Keep tab on distances from `start` node.
A = {start: 0, target: -1}
while queue:
A = queue.pop(0 )
if node == target:
A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 106 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCAmelCase ( A__: Optional[int] , A__: Optional[Any]=() , A__: List[str]=None , A__: Any="no" , A__: Any="29500" ) -> List[Any]:
__lowerCamelCase : Any = False
__lowerCamelCase : Any = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
__lowerCamelCase : Optional[Any] = True
elif "IPython" in sys.modules:
__lowerCamelCase : Tuple = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
__lowerCamelCase : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , A__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
__lowerCamelCase : Any = 8
__lowerCamelCase : List[Any] = PrepareForLaunch(A__ , distributed_type='TPU' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(A__ , args=A__ , nprocs=A__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*A__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=A__ , master_addr='127.0.01' , master_port=A__ , mixed_precision=A__ ):
__lowerCamelCase : Dict = PrepareForLaunch(A__ , distributed_type='MULTI_GPU' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(A__ , args=A__ , nprocs=A__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCamelCase : List[str] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*A__ )
def UpperCAmelCase ( A__: Dict , A__: Dict=() , A__: List[Any]=2 ) -> str:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=A__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
__lowerCamelCase : List[Any] = PrepareForLaunch(A__ , debug=A__ )
start_processes(A__ , args=A__ , nprocs=A__ , start_method='fork' )
| 706 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase ( A__: list[int] , A__: list[int] , A__: int ) -> list[int]:
__lowerCamelCase : List[Any] = [0] * no_of_processes
__lowerCamelCase : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
__lowerCamelCase : Dict = burst_time[i]
__lowerCamelCase : Any = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = 999999999
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowerCamelCase : List[Any] = remaining_time[j]
__lowerCamelCase : List[str] = j
__lowerCamelCase : Optional[int] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowerCamelCase : Optional[Any] = remaining_time[short]
if minm == 0:
__lowerCamelCase : Optional[int] = 999999999
if remaining_time[short] == 0:
complete += 1
__lowerCamelCase : List[str] = False
# Find finish time of current process
__lowerCamelCase : Dict = increment_time + 1
# Calculate waiting time
__lowerCamelCase : Any = finish_time - arrival_time[short]
__lowerCamelCase : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__lowerCamelCase : Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase ( A__: list[int] , A__: int , A__: list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = [0] * no_of_processes
for i in range(A__ ):
__lowerCamelCase : Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase ( A__: list[int] , A__: list[int] , A__: int ) -> None:
__lowerCamelCase : int = 0
__lowerCamelCase : Dict = 0
for i in range(A__ ):
__lowerCamelCase : str = total_waiting_time + waiting_time[i]
__lowerCamelCase : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a_ : int = int(input())
a_ : List[str] = [0] * no_of_processes
a_ : int = [0] * no_of_processes
a_ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a_ , a_ : Union[str, Any] = map(int, input().split())
a_ : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a_ : List[str] = burst_time
a_ : List[Any] = no_of_processes
a_ : Tuple = waiting_time
a_ : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a_ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 263 | 0 |
'''simple docstring'''
def lowercase_ ( ) -> int:
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : str = 1
lowerCamelCase_ : str = 2
while i * i <= n:
lowerCamelCase_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 422 |
"""simple docstring"""
lowercase__ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowercase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowercase__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 610 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def UpperCamelCase ( lowercase_ : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def UpperCamelCase ( lowercase_ : str = "" ) -> bool:
'''simple docstring'''
if len(lowercase_ ) == 0:
return True
lowercase =input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase ={}
for character in lower_case_input_str:
lowercase =character_freq_dict.get(lowercase_ , 0 ) + 1
lowercase =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( lowercase_ : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , lowercase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
_UpperCAmelCase : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 145 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
lowercase =transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
lowercase =transform(lowercase_ ).unsqueeze(0 ).to(lowercase_ )
return image
def UpperCamelCase ( lowercase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
if "visual_encoder" in key:
lowercase =re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowercase_ )
if "blocks" in key:
lowercase =re.sub(R'''blocks''' , '''layers''' , lowercase_ )
if "attn" in key:
lowercase =re.sub(R'''attn''' , '''self_attn''' , lowercase_ )
if "norm1" in key:
lowercase =re.sub(R'''norm1''' , '''layer_norm1''' , lowercase_ )
if "norm2" in key:
lowercase =re.sub(R'''norm2''' , '''layer_norm2''' , lowercase_ )
if "encoder.norm" in key:
lowercase =re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowercase_ )
if "encoder.patch_embed.proj" in key:
lowercase =re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowercase_ )
if "encoder.pos_embed" in key:
lowercase =re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowercase_ )
if "encoder.cls_token" in key:
lowercase =re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowercase_ )
if "self_attn" in key:
lowercase =re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowercase_ )
return key
@torch.no_grad()
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : int=None ) -> str:
'''simple docstring'''
if config_path is not None:
lowercase =BlipConfig.from_pretrained(lowercase_ )
else:
lowercase =BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
lowercase =BlipForConditionalGeneration(lowercase_ ).eval()
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase =blip_decoder(pretrained=lowercase_ , image_size=3_8_4 , vit='''base''' )
lowercase =pt_model.eval()
lowercase =pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
hf_model.load_state_dict(lowercase_ )
lowercase =3_8_4
lowercase =load_demo_image(image_size=lowercase_ , device='''cpu''' )
lowercase =BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase =tokenizer(['''a picture of'''] ).input_ids
lowercase =hf_model.generate(lowercase_ , lowercase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
lowercase =hf_model.generate(lowercase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase =(
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase =blip_vqa(pretrained=lowercase_ , image_size=lowercase_ , vit='''base''' )
vqa_model.eval()
lowercase =vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
lowercase =BlipForQuestionAnswering(lowercase_ )
hf_vqa_model.load_state_dict(lowercase_ )
lowercase =['''How many dogs are in this image?''']
lowercase =tokenizer(lowercase_ , return_tensors='''pt''' ).input_ids
lowercase =hf_vqa_model.generate(lowercase_ , lowercase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase ='''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase =blip_itm(pretrained=lowercase_ , image_size=lowercase_ , vit='''base''' )
itm_model.eval()
lowercase =itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase =modified_state_dict.pop(lowercase_ )
lowercase =rename_key(lowercase_ )
lowercase =value
lowercase =BlipForImageTextRetrieval(lowercase_ )
lowercase =['''A picture of a woman with a dog sitting in a beach''']
lowercase =tokenizer(
lowercase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowercase_ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase_ )
hf_itm_model.eval()
lowercase =hf_itm_model(lowercase_ , lowercase_ , use_itm_head=lowercase_ )
lowercase =hf_itm_model(lowercase_ , lowercase_ , use_itm_head=lowercase_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 145 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_A = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_A = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
_A = """|""".join(sys.argv[1:])
_A = re.compile(Rf'^({joined_dirs}).*?\.py$')
_A = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 158 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A_ ( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : dict ) -> PegasusForConditionalGeneration:
__SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Tuple = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : List[Any] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = array
return tf_weights
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
# save tokenizer first
__SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ).parent.name
__SCREAMING_SNAKE_CASE : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
__SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE : Dict = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_A = parser.parse_args()
if args.save_dir is None:
_A = Path(args.tf_ckpt_path).parent.name
_A = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( __lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = VideoMAEConfig()
set_architecture_configs(__lowercase , __lowercase )
if "finetuned" not in model_name:
__UpperCamelCase = False
if "finetuned" in model_name:
__UpperCamelCase = 'huggingface/label-files'
if "kinetics" in model_name:
__UpperCamelCase = 400
__UpperCamelCase = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
__UpperCamelCase = 174
__UpperCamelCase = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
__UpperCamelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( __lowercase , __lowercase ) -> Dict:
"""simple docstring"""
if "small" in model_name:
__UpperCamelCase = 384
__UpperCamelCase = 1536
__UpperCamelCase = 12
__UpperCamelCase = 16
__UpperCamelCase = 12
__UpperCamelCase = 3
__UpperCamelCase = 192
__UpperCamelCase = 768
elif "large" in model_name:
__UpperCamelCase = 1024
__UpperCamelCase = 4096
__UpperCamelCase = 24
__UpperCamelCase = 16
__UpperCamelCase = 12
__UpperCamelCase = 8
__UpperCamelCase = 512
__UpperCamelCase = 2048
elif "huge" in model_name:
__UpperCamelCase = 1280
__UpperCamelCase = 5120
__UpperCamelCase = 32
__UpperCamelCase = 16
__UpperCamelCase = 12
__UpperCamelCase = 8
__UpperCamelCase = 640
__UpperCamelCase = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def _a ( __lowercase ) -> Tuple:
"""simple docstring"""
if "encoder." in name:
__UpperCamelCase = name.replace('encoder.' , '' )
if "cls_token" in name:
__UpperCamelCase = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
__UpperCamelCase = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
__UpperCamelCase = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__UpperCamelCase = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
__UpperCamelCase = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
__UpperCamelCase = name.replace('attn' , 'attention.self' )
if "attn" in name:
__UpperCamelCase = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
__UpperCamelCase = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
__UpperCamelCase = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
__UpperCamelCase = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__UpperCamelCase = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__UpperCamelCase = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
__UpperCamelCase = name.replace('head' , 'classifier' )
return name
def _a ( __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(__lowercase )
if key.startswith('encoder.' ):
__UpperCamelCase = key.replace('encoder.' , '' )
if "qkv" in key:
__UpperCamelCase = key.split('.' )
if key.startswith('decoder.blocks' ):
__UpperCamelCase = config.decoder_hidden_size
__UpperCamelCase = int(key_split[2] )
__UpperCamelCase = 'decoder.decoder_layers.'
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[dim : dim * 2, :]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = config.hidden_size
__UpperCamelCase = int(key_split[1] )
__UpperCamelCase = 'videomae.encoder.layer.'
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[dim : dim * 2, :]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val
return orig_state_dict
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__UpperCamelCase = np.load(__lowercase )
return list(__lowercase )
def _a ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = get_videomae_config(__lowercase )
if "finetuned" in model_name:
__UpperCamelCase = VideoMAEForVideoClassification(__lowercase )
else:
__UpperCamelCase = VideoMAEForPreTraining(__lowercase )
# download original checkpoint, hosted on Google Drive
__UpperCamelCase = 'pytorch_model.bin'
gdown.cached_download(__lowercase , __lowercase , quiet=__lowercase )
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
if "model" in files:
__UpperCamelCase = files['model']
else:
__UpperCamelCase = files['module']
__UpperCamelCase = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify model on basic input
__UpperCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__UpperCamelCase = prepare_video()
__UpperCamelCase = image_processor(__lowercase , return_tensors='pt' )
if "finetuned" not in model_name:
__UpperCamelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__UpperCamelCase = torch.load(__lowercase )
__UpperCamelCase = model(**__lowercase )
__UpperCamelCase = outputs.logits
__UpperCamelCase = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__UpperCamelCase = torch.Size([1, 400] )
__UpperCamelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
__UpperCamelCase = torch.Size([1, 174] )
__UpperCamelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
__UpperCamelCase = torch.Size([1, 1408, 1536] )
__UpperCamelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
__UpperCamelCase = torch.Size([1, 1408, 1536] )
__UpperCamelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__UpperCamelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
__UpperCamelCase = torch.Size([1, 1408, 1536] )
__UpperCamelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__UpperCamelCase = torch.Size([1, 400] )
__UpperCamelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__UpperCamelCase = torch.Size([1, 400] )
__UpperCamelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__UpperCamelCase = torch.Size([1, 400] )
__UpperCamelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
__UpperCamelCase = torch.Size([1, 400] )
__UpperCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
__UpperCamelCase = torch.Size([1, 1408, 1536] )
__UpperCamelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__UpperCamelCase = torch.Size([1, 174] )
__UpperCamelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
__UpperCamelCase = torch.Size([1, 1408, 1536] )
__UpperCamelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__UpperCamelCase = torch.Size([1, 174] )
__UpperCamelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
__UpperCamelCase = outputs.loss
assert torch.allclose(__lowercase , __lowercase , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(__lowercase , organization='nielsr' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 567 |
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 567 | 1 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Dict = 'Create a default config file for Accelerate with only a few flags set.'
def _lowerCAmelCase ( __magic_name__ :str="no" , __magic_name__ :Dict = default_json_config_file , __magic_name__ :List[str] = False ):
UpperCAmelCase_ = Path(lowercase__ )
path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCAmelCase_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCAmelCase_ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase_ = torch.cuda.device_count()
UpperCAmelCase_ = num_gpus
UpperCAmelCase_ = False
if num_gpus > 1:
UpperCAmelCase_ = 'MULTI_GPU'
else:
UpperCAmelCase_ = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase_ = torch.xpu.device_count()
UpperCAmelCase_ = num_xpus
UpperCAmelCase_ = False
if num_xpus > 1:
UpperCAmelCase_ = 'MULTI_XPU'
else:
UpperCAmelCase_ = 'NO'
elif is_npu_available():
UpperCAmelCase_ = torch.npu.device_count()
UpperCAmelCase_ = num_npus
UpperCAmelCase_ = False
if num_npus > 1:
UpperCAmelCase_ = 'MULTI_NPU'
else:
UpperCAmelCase_ = 'NO'
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = True
UpperCAmelCase_ = 1
UpperCAmelCase_ = 'NO'
UpperCAmelCase_ = ClusterConfig(**lowercase__ )
config.to_json_file(lowercase__ )
return path
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :List[Any] ):
UpperCAmelCase_ = parser.add_parser('''default''' , parents=lowercase__ , help=lowercase__ , formatter_class=lowercase__ )
parser.add_argument(
'''--config_file''' , default=lowercase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=lowercase__ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=lowercase__ )
return parser
def _lowerCAmelCase ( __magic_name__ :Dict ):
UpperCAmelCase_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 121 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=64 , lowercase=5 , lowercase=4 , lowercase=64 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : int = scope
def A_ ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def A_ ( self ):
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = MPNetModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Tuple = model(lowercase , lowercase )
_lowerCamelCase : Tuple = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MPNetForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : int = MPNetForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : List[str] = MPNetForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Any = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Union[str, Any] = MPNetForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : List[Any] = config_and_inputs
_lowerCamelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MPNetModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) | 630 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase : int = {
'''camembert-base''': 5_12,
}
__lowerCamelCase : Any = '''▁'''
class a__ ( _UpperCamelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = CamembertTokenizer
def __init__( self : List[Any],_A : int=None,_A : str=None,_A : Optional[int]="<s>",_A : Dict="</s>",_A : List[Any]="</s>",_A : List[str]="<s>",_A : List[str]="<unk>",_A : List[Any]="<pad>",_A : List[Any]="<mask>",_A : Tuple=["<s>NOTUSED", "</s>NOTUSED"],**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = AddedToken(__a,lstrip=__a,rstrip=__a ) if isinstance(__a,__a ) else mask_token
super().__init__(
__a,tokenizer_file=__a,bos_token=__a,eos_token=__a,sep_token=__a,cls_token=__a,unk_token=__a,pad_token=__a,mask_token=__a,additional_special_tokens=__a,**__a,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Tuple = False if not self.vocab_file else True
def __UpperCamelCase ( self : Optional[int],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : str,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
__a,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file,__a )
return (out_vocab_file,)
| 707 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 316 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( __magic_name__ ):
def __init__( self : Dict , *UpperCamelCase_ : Any , UpperCamelCase_ : int=None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : int):
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : Tuple = eval_examples
__UpperCAmelCase : int = post_process_function
def a_ ( self : Optional[int] , UpperCamelCase_ : Optional[Dataset] = None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "eval" , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
__UpperCAmelCase : str = gen_kwargs.copy()
__UpperCAmelCase : Tuple = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length") is not None else self.args.generation_max_length
)
__UpperCAmelCase : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
__UpperCAmelCase : Union[str, Any] = gen_kwargs
__UpperCAmelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : List[Any] = self.get_eval_dataloader(UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : Optional[int] = self.compute_metrics
__UpperCAmelCase : str = None
__UpperCAmelCase : List[str] = time.time()
__UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : Dict = eval_loop(
UpperCamelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
__UpperCAmelCase : Optional[Any] = compute_metrics
__UpperCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.compute_metrics(UpperCamelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__UpperCAmelCase : List[Any] = metrics.pop(UpperCamelCase_)
metrics.update(output.metrics)
else:
__UpperCAmelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__UpperCAmelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_)
return metrics
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=None , UpperCamelCase_ : str = "test" , **UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = gen_kwargs.copy()
__UpperCAmelCase : Dict = self.get_test_dataloader(UpperCamelCase_)
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : Optional[int] = self.compute_metrics
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : str = time.time()
__UpperCAmelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : List[str] = eval_loop(
UpperCamelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
__UpperCAmelCase : Optional[Any] = compute_metrics
__UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : Optional[Any] = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , "predict")
__UpperCAmelCase : Optional[int] = self.compute_metrics(UpperCamelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__UpperCAmelCase : Union[str, Any] = metrics.pop(UpperCamelCase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_)
| 77 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def _A ( UpperCAmelCase ,UpperCAmelCase=False ):
'''simple docstring'''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''
else:
A__ = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = dct.pop(UpperCAmelCase )
A__ = val
def _A ( ):
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(UpperCAmelCase ,stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = DeiTConfig()
# all deit models have fine-tuned heads
A__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ = 1000
A__ = 'huggingface/label-files'
A__ = 'imagenet-1k-id2label.json'
A__ = json.load(open(hf_hub_download(UpperCAmelCase ,UpperCAmelCase ,repo_type='dataset' ) ,'r' ) )
A__ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(deit_name[-6:-4] )
A__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif deit_name[9:].startswith('small' ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
# load original model from timm
A__ = timm.create_model(UpperCAmelCase ,pretrained=UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
A__ = create_rename_keys(UpperCAmelCase ,UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# load HuggingFace model
A__ = DeiTForImageClassificationWithTeacher(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ = DeiTImageProcessor(size=UpperCAmelCase ,crop_size=config.image_size )
A__ = image_processor(images=prepare_img() ,return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(UpperCAmelCase )
A__ = timm_model(UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase ,outputs.logits ,atol=1e-3 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 531 | 0 |
'''simple docstring'''
from math import pi
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 172 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _A :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] )-> Dict:
pass
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : Union[str, Any] = pipeline(
"""document-question-answering""" , model=lowerCamelCase , tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : int = [
{
"""image""": load_image(lowerCamelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] )-> Union[str, Any]:
snake_case__ : List[Any] = dqa_pipeline(lowerCamelCase , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> List[Any]:
snake_case__ : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = """How many cats are there?"""
snake_case__ : Optional[int] = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , words=lowerCamelCase , boxes=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> Any:
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : List[Any] = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : Any = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : List[Any] )-> Any:
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Optional[Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , )
snake_case__ : Optional[int] = INVOICE_URL
snake_case__ : Union[str, Any] = """What is the invoice number?"""
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : int )-> str:
snake_case__ : Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , max_seq_len=50 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : List[Any] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
snake_case__ : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : int )-> Tuple:
snake_case__ : Optional[int] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __lowerCAmelCase ( self : int )-> List[Any]:
pass
| 172 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = None
@property
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """padding_value""" ) )
def a_ ( self : str ) -> int:
"""simple docstring"""
A__ = self.feat_extract_tester.prepare_inputs_for_common()
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , processed_features[input_name] ) ) )
A__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCAmelCase )
A__ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCAmelCase )
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCAmelCase )
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def a_ ( self : int , __lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(__lowerCAmelCase : Union[str, Any] ):
A__ = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
if not np.allclose(np.asarray(__lowerCAmelCase ) , np.asarray(__lowerCAmelCase ) , atol=1e-3 ):
return False
return True
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCAmelCase )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = self.feat_extract_tester.seq_length_diff
A__ = self.feat_extract_tester.max_seq_length + pad_diff
A__ = self.feat_extract_tester.min_seq_length
A__ = self.feat_extract_tester.batch_size
A__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
A__ = feat_extract.pad(__lowerCAmelCase , padding=__lowerCAmelCase )
A__ = input_a[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" )
A__ = input_a[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
A__ = input_a[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
A__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowerCAmelCase ):
feat_extract.pad(__lowerCAmelCase , padding="""max_length""" )[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , return_tensors="""np""" )
A__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
A__ = feat_extract.pad(__lowerCAmelCase , pad_to_multiple_of=10 )
A__ = input_a[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__lowerCAmelCase )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__lowerCAmelCase , return_tensors="""np""" , )
A__ = input_a[input_name]
self.assertTrue(all(len(__lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
A__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def a_ ( self : Tuple , __lowerCAmelCase : List[str]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(__lowerCAmelCase : Tuple ):
A__ = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCAmelCase : int , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
if not np.allclose(np.asarray(__lowerCAmelCase ) , np.asarray(__lowerCAmelCase ) , atol=1e-3 ):
return False
return True
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCAmelCase )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=__lowerCAmelCase )
A__ = input_a[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
A__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCAmelCase ) )
# truncate to smallest with np
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=__lowerCAmelCase , )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
A__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCAmelCase ) )
# truncate to middle
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__lowerCAmelCase , return_tensors="""np""" , )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__lowerCAmelCase )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
A__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCAmelCase , __lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCAmelCase ):
feat_extract.pad(__lowerCAmelCase , truncation=__lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCAmelCase ):
feat_extract.pad(__lowerCAmelCase , padding="""longest""" , truncation=__lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCAmelCase ):
feat_extract.pad(__lowerCAmelCase , padding="""longest""" , truncation=__lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowerCAmelCase ):
feat_extract.pad(__lowerCAmelCase , padding="""max_length""" , truncation=__lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
A__ = 12
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCAmelCase , truncation=__lowerCAmelCase , )
A__ = input_a[input_name]
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCAmelCase , )
A__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
A__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
A__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCAmelCase ) )
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._check_padding(numpify=__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self._check_truncation(numpify=__lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self._check_truncation(numpify=__lowerCAmelCase )
@require_torch
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_common()
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_common()
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**__lowerCAmelCase )
A__ = self.feat_extract_tester.prepare_inputs_for_common()
A__ = [len(__lowerCAmelCase ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.pad(__lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCAmelCase )
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**__lowerCAmelCase )
A__ = self.feat_extract_tester.prepare_inputs_for_common()
A__ = [len(__lowerCAmelCase ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = min(__lowerCAmelCase )
A__ = feat_extract.pad(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 176 |
def __lowerCamelCase ( __a :int , __a :int ) -> Optional[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
else:
return a * actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
def __lowerCamelCase ( __a :int , __a :int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__a , __a )
return actual_power(__a , __a )
if __name__ == "__main__":
print(power(-2, -3))
| 176 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Any , __A : Dict , __A : Tuple , __A : Tuple ):
a_ : List[Any] = StableDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a_ : str = load_file(__A )
a_ : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a_ : Union[str, Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
a_ : Any = pipeline.text_encoder
else:
a_ : Union[str, Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
a_ : Any = pipeline.unet
# find the target layer
a_ : Any = layer_infos.pop(0 )
while len(__A ) > -1:
try:
a_ : List[Any] = curr_layer.__getattr__(__A )
if len(__A ) > 0:
a_ : List[Any] = layer_infos.pop(0 )
elif len(__A ) == 0:
break
except Exception:
if len(__A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a_ : Union[str, Any] = layer_infos.pop(0 )
a_ : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__A )
else:
pair_keys.append(__A )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a_ : int = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a_ : Optional[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A ).unsqueeze(2 ).unsqueeze(3 )
else:
a_ : Tuple = state_dict[pair_keys[0]].to(torch.floataa )
a_ : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A )
# update visited list
for item in pair_keys:
visited.append(__A )
return pipeline
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.base_model_path
__lowerCAmelCase = args.checkpoint_path
__lowerCAmelCase = args.dump_path
__lowerCAmelCase = args.lora_prefix_unet
__lowerCAmelCase = args.lora_prefix_text_encoder
__lowerCAmelCase = args.alpha
__lowerCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 715 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self ,__a = 0 ) -> str:
snake_case : List[Any] = key
def snake_case_ ( self ,__a ,__a ) -> list[str]:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case_ ( self ,__a ,__a ) -> list[str]:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case_ ( self ,__a ,__a = 0 ) -> str:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case : List[str] = """"""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case_ ( self ,__a ,__a = 0 ) -> str:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case : List[str] = """"""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case_ ( self ,__a ,__a = 0 ) -> bool:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
try:
with open(__a ) as fin, open("""encrypt.out""" ,"""w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__a ,__a ) )
except OSError:
return False
return True
def snake_case_ ( self ,__a ,__a ) -> bool:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
try:
with open(__a ) as fin, open("""decrypt.out""" ,"""w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__a ,__a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 116 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Dict:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> str:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Any:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> int:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : int = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Optional[int]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[str]:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> int:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> str:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Dict:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> Dict:
requires_backends(cls ,["""flax"""] )
class _a (metaclass=a__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["""flax"""]
def __init__( self ,*__a ,**__a ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def snake_case_ ( cls ,*__a ,**__a ) -> int:
requires_backends(cls ,["""flax"""] )
| 116 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( A ):
def __init__( self , *__A , __A=None , __A=None , **__A ) -> List[Any]:
super().__init__(*__A , **__A )
SCREAMING_SNAKE_CASE_ : Any =eval_examples
SCREAMING_SNAKE_CASE_ : List[str] =post_process_function
def _snake_case ( self , __A = None , __A=None , __A = None , __A = "eval" , **__A , ) -> Dict[str, float]:
SCREAMING_SNAKE_CASE_ : str =gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Any =(
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ : Optional[int] =(
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ : List[str] =gen_kwargs
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_eval_dataloader(__A )
SCREAMING_SNAKE_CASE_ : str =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Tuple =self.compute_metrics
SCREAMING_SNAKE_CASE_ : str =None
SCREAMING_SNAKE_CASE_ : Optional[int] =time.time()
SCREAMING_SNAKE_CASE_ : Dict =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : Any =eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , metric_key_prefix=__A , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] =compute_metrics
SCREAMING_SNAKE_CASE_ : Dict =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__A , __A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ : Tuple =self.post_process_function(__A , __A , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : Dict =metrics.pop(__A )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def _snake_case ( self , __A , __A , __A=None , __A = "test" , **__A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : List[str] =self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : List[Any] =self.compute_metrics
SCREAMING_SNAKE_CASE_ : Any =None
SCREAMING_SNAKE_CASE_ : Optional[int] =time.time()
SCREAMING_SNAKE_CASE_ : Optional[int] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] =eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , metric_key_prefix=__A , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] =compute_metrics
SCREAMING_SNAKE_CASE_ : Dict =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__A , __A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : Dict =self.post_process_function(__A , __A , __A , '''predict''' )
SCREAMING_SNAKE_CASE_ : Tuple =self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : List[str] =metrics.pop(__A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
| 431 |
import copy
import re
class lowercase_ :
__lowerCamelCase = "hp"
__lowerCamelCase = {}
__lowerCamelCase = None
@classmethod
def _snake_case ( cls , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =prefix
SCREAMING_SNAKE_CASE_ : Optional[Any] =defaults
cls.build_naming_info()
@staticmethod
def _snake_case ( __A , __A ) -> str:
if len(__A ) == 0:
return ""
SCREAMING_SNAKE_CASE_ : Any =None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__A ) + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : int =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =''''''
while integer != 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE_ : Dict =0
while True:
SCREAMING_SNAKE_CASE_ : Optional[int] =word + '''#''' + int_to_alphabetic(__A )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : Tuple =sword
break
SCREAMING_SNAKE_CASE_ : List[str] =short_word
SCREAMING_SNAKE_CASE_ : str =word
return short_word
@staticmethod
def _snake_case ( __A , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =param_name.split('''_''' )
SCREAMING_SNAKE_CASE_ : str =[TrialShortNamer.shortname_for_word(__A , __A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE_ : Tuple =['''''', '''_''']
for separator in separators:
SCREAMING_SNAKE_CASE_ : Dict =separator.join(__A )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE_ : List[str] =shortname
SCREAMING_SNAKE_CASE_ : Any =param_name
return shortname
return param_name
@staticmethod
def _snake_case ( __A , __A ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TrialShortNamer.shortname_for_key(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =short_name
SCREAMING_SNAKE_CASE_ : Dict =param_name
@classmethod
def _snake_case ( cls ) -> Optional[int]:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
SCREAMING_SNAKE_CASE_ : Tuple =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__A , __A )
SCREAMING_SNAKE_CASE_ : str =info
@classmethod
def _snake_case ( cls , __A ) -> List[str]:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE_ : int =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE_ : Optional[int] =cls.NAMING_INFO['''short_param'''][k]
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE_ : List[Any] =1 if v else 0
SCREAMING_SNAKE_CASE_ : List[Any] ='''''' if isinstance(__A , (int, float) ) else '''-'''
SCREAMING_SNAKE_CASE_ : Optional[Any] =F'{key}{sep}{v}'
name.append(__A )
return "_".join(__A )
@classmethod
def _snake_case ( cls , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
else:
SCREAMING_SNAKE_CASE_ : Tuple =repr.split('''_''' )
SCREAMING_SNAKE_CASE_ : List[Any] ={}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =value.split('''-''' )
else:
SCREAMING_SNAKE_CASE_ : Any =re.sub('''[0-9.]''' , '''''' , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =float(re.sub('''[^0-9.]''' , '''''' , __A ) )
SCREAMING_SNAKE_CASE_ : List[Any] =cls.NAMING_INFO['''reverse_short_param'''][p_k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE_ : Tuple =cls.DEFAULTS[k]
return parameters
| 431 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267 | 0 |
import math
def __lowerCamelCase ( __lowerCAmelCase : int = 100 ) -> int:
__UpperCamelCase : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
__UpperCamelCase : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 515 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
def __init__( self : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(lowerCamelCase__ )
def a ( self : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=None ):
"""simple docstring"""
__UpperCamelCase : Tuple = {}
if frame_sampling_rate is not None:
__UpperCamelCase : Dict = frame_sampling_rate
if num_frames is not None:
__UpperCamelCase : Optional[int] = num_frames
__UpperCamelCase : Dict = {}
if top_k is not None:
__UpperCamelCase : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase__ : Union[str, List[str]] , **lowerCamelCase__ : Any ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
__UpperCamelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__UpperCamelCase : Tuple = BytesIO(requests.get(lowerCamelCase__ ).content )
__UpperCamelCase : Dict = VideoReader(lowerCamelCase__ )
videoreader.seek(0 )
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Any = num_frames * frame_sampling_rate - 1
__UpperCamelCase : Dict = np.linspace(lowerCamelCase__ , lowerCamelCase__ , num=lowerCamelCase__ , dtype=np.intaa )
__UpperCamelCase : Any = videoreader.get_batch(lowerCamelCase__ ).asnumpy()
__UpperCamelCase : Dict = list(lowerCamelCase__ )
__UpperCamelCase : List[str] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def a ( self : Dict , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.model(**lowerCamelCase__ )
return model_outputs
def a ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__UpperCamelCase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase : str = model_outputs.logits.softmax(-1 )[0]
__UpperCamelCase , __UpperCamelCase : Any = probs.topk(lowerCamelCase__ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : List[Any] = scores.tolist()
__UpperCamelCase : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 515 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(_SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 93 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[Any] = MobileBertTokenizer
__magic_name__ : str = MobileBertTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : List[Any] = True
__magic_name__ : Dict = filter_non_english
__magic_name__ : str = "google/mobilebert-uncased"
def a__( self : Dict )-> Any:
"""simple docstring"""
super().setUp()
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a__( self : Any , lowerCAmelCase : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__( self : str )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a__( self : Optional[int] )-> int:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__( self : Any )-> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase = tokenizer_r.encode_plus(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase )
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 210 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE_ : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Any = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE_ : Any = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE_ : List[str] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
snake_case_ = 'AGGTAB'
snake_case_ = 'GXTXAYB'
snake_case_ = 4
snake_case_ = 'GTAB'
snake_case_ , snake_case_ = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 1 |
import json
import sys
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str ):
'''simple docstring'''
with open(a__ , encoding='utf-8' ) as f:
UpperCAmelCase_ : int = json.load(a__ )
UpperCAmelCase_ : List[str] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(a__ ):
UpperCAmelCase_ : Tuple = results[benchmark_name]
UpperCAmelCase_ : str = benchmark_name.split('/' )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
UpperCAmelCase_ : Tuple = '| metric |'
UpperCAmelCase_ : Optional[int] = '|--------|'
UpperCAmelCase_ : Optional[int] = '| new / old (diff) |'
for metric_name in sorted(a__ ):
UpperCAmelCase_ : Dict = benchmark_res[metric_name]
UpperCAmelCase_ : List[str] = metric_vals['new']
UpperCAmelCase_ : Any = metric_vals.get('old' , a__ )
UpperCAmelCase_ : Optional[int] = metric_vals.get('diff' , a__ )
UpperCAmelCase_ : Union[str, Any] = F" {new_val:f}" if isinstance(a__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(a__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(a__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(a__ ) )
if __name__ == "__main__":
__UpperCAmelCase = sys.argv[1]
__UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 406 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE_ = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : Optional[int] = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self : Dict , snake_case : Union[str, Any]=None , snake_case : Union[str, Any]=None , snake_case : Dict="<s>" , snake_case : Optional[int]="</s>" , snake_case : str="</s>" , snake_case : Dict="<s>" , snake_case : Tuple="<unk>" , snake_case : List[str]="<pad>" , snake_case : Union[str, Any]="<mask>" , snake_case : Dict=None , snake_case : List[Any]=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]=False , **snake_case : List[Any] , ):
"""simple docstring"""
_snake_case : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
_snake_case : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , legacy_behaviour=snake_case , **snake_case , )
_snake_case : Dict = vocab_file
_snake_case : List[str] = False if not self.vocab_file else True
_snake_case : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_snake_case : Tuple = src_lang if src_lang is not None else 'eng_Latn'
_snake_case : Tuple = self.convert_tokens_to_ids(self._src_lang )
_snake_case : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : List[str] , snake_case : str ):
"""simple docstring"""
_snake_case : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case : str = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[Any] , snake_case : Union[str, Any] , snake_case : str , snake_case : Optional[str] , snake_case : Optional[str] , **snake_case : Any ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_snake_case : Optional[Any] = src_lang
_snake_case : str = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
_snake_case : Dict = self.convert_tokens_to_ids(snake_case )
_snake_case : Tuple = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : Optional[Any] , snake_case : List[str] , snake_case : str = "eng_Latn" , snake_case : Optional[List[str]] = None , snake_case : str = "fra_Latn" , **snake_case : Any , ):
"""simple docstring"""
_snake_case : Dict = src_lang
_snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : int , snake_case : Any ):
"""simple docstring"""
_snake_case : int = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
_snake_case : Any = []
_snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case : int = [self.cur_lang_code]
_snake_case : List[str] = [self.eos_token_id]
_snake_case : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case : str = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : int , snake_case : str ):
"""simple docstring"""
_snake_case : Any = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
_snake_case : Dict = []
_snake_case : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case : Any = [self.cur_lang_code]
_snake_case : str = [self.eos_token_id]
_snake_case : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Any , snake_case : str , snake_case : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_snake_case : Any = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 517 | 0 |
def lowerCamelCase__ ( _lowerCamelCase = 1000 ) ->int:
_UpperCAmelCase =2**power
_UpperCAmelCase =str(_lowerCamelCase )
_UpperCAmelCase =list(_lowerCamelCase )
_UpperCAmelCase =0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
snake_case__ : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case__ : Union[str, Any] = solution(power)
print('Sum of the digits is: ', result)
| 592 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =1
_UpperCAmelCase =3
_UpperCAmelCase =(32, 32)
_UpperCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_snake_case , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=_snake_case , )[0]
_UpperCAmelCase =image[0, -3:, -3:, -1]
_UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase =unet.half()
_UpperCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def SCREAMING_SNAKE_CASE ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 592 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowerCamelCase = """bert-base-cased"""
__lowerCamelCase = """google/pegasus-xsum"""
__lowerCamelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
__lowerCamelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
__lowerCamelCase = """patrickvonplaten/t5-tiny-random"""
__lowerCamelCase = """sshleifer/bart-tiny-random"""
__lowerCamelCase = """sshleifer/tiny-mbart"""
__lowerCamelCase = """sshleifer/tiny-marian-en-de"""
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> Optional[int]:
_A = '''\n'''.join(__snake_case )
Path(__snake_case ).open('''w''' ).writelines(__snake_case )
def UpperCAmelCase__ ( __snake_case ) -> str:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case , F'''{split}.source''' ) , __snake_case )
_dump_articles(os.path.join(__snake_case , F'''{split}.target''' ) , __snake_case )
return tmp_dir
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowercase_ ( self , a ) -> Optional[int]:
"""simple docstring"""
_A = AutoTokenizer.from_pretrained(a )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(a ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(a ) ) for a in SUMMARIES )
_A = 4
_A = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_A , _A = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
_A = SeqaSeqDataset(
a , data_dir=a , type_path='''train''' , max_source_length=a , max_target_length=a , src_lang=a , tgt_lang=a , )
_A = DataLoader(a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a , a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_A = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowercase_ ( self , a ) -> List[str]:
"""simple docstring"""
_A = AutoTokenizer.from_pretrained(a )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(a ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(a ) ) for a in SUMMARIES )
_A = 4
_A = LegacySeqaSeqDataset(
a , data_dir=a , type_path='''train''' , max_source_length=2_0 , max_target_length=a , )
_A = DataLoader(a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_A = tmp_dir.joinpath('''train.source''' ).open().readlines()
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a , a , 1_2_8 , a )
_A = {x.name for x in tmp_dir.iterdir()}
_A = {x.name for x in save_dir.iterdir()}
_A = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a ) < len(a )
assert len(a ) == 1
assert len(packed_examples[0] ) == sum(len(a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
_A , _A , _A = self._get_dataset(max_len=6_4 )
_A = 6_4
_A = ds.make_dynamic_sampler(a , required_batch_size_multiple=a )
_A = [len(a ) for x in batch_sampler]
assert len(set(a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a ) == len(a ) # no dropped or added examples
_A = DataLoader(a , batch_sampler=a , collate_fn=ds.collate_fn , num_workers=2 )
_A = []
_A = []
for batch in data_loader:
_A = batch['''input_ids'''].shape
_A = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_A = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a )
assert num_src_per_batch[0] == max(a )
if failures:
raise AssertionError(f'''too many tokens in {len(a )} batches''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A , _A , _A = self._get_dataset(max_len=5_1_2 )
_A = 2
_A = ds.make_sortish_sampler(a , shuffle=a )
_A = DataLoader(a , batch_size=a , collate_fn=ds.collate_fn , num_workers=2 )
_A = DataLoader(a , batch_size=a , collate_fn=ds.collate_fn , num_workers=2 , sampler=a )
_A = tokenizer.pad_token_id
def count_pad_tokens(a , a="input_ids" ):
return [batch[k].eq(a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a , k='''labels''' ) ) < sum(count_pad_tokens(a , k='''labels''' ) )
assert sum(count_pad_tokens(a ) ) < sum(count_pad_tokens(a ) )
assert len(a ) == len(a )
def lowercase_ ( self , a=1_0_0_0 , a=1_2_8 ) -> List[Any]:
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , a ):
_A = '''examples/seq2seq/wmt_en_ro'''
_A = max_len * 2 * 6_4
if not Path(a ).joinpath('''train.len''' ).exists():
save_len_file(a , a )
else:
_A = '''examples/seq2seq/test_data/wmt_en_ro'''
_A = max_len * 4
save_len_file(a , a )
_A = AutoTokenizer.from_pretrained(a )
_A = SeqaSeqDataset(
a , data_dir=a , type_path='''train''' , max_source_length=a , max_target_length=a , n_obs=a , )
return ds, max_tokens, tokenizer
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A , _A , _A = self._get_dataset()
_A = set(DistributedSortishSampler(a , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=a ) )
_A = set(DistributedSortishSampler(a , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=a ) )
assert idsa.intersection(a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowercase_ ( self , a ) -> Union[str, Any]:
"""simple docstring"""
_A = AutoTokenizer.from_pretrained(a , use_fast=a )
if tok_name == MBART_TINY:
_A = SeqaSeqDataset(
a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_A = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_A = SeqaSeqDataset(
a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_A = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a ) == 1 if tok_name == BART_TINY else len(a ) == 0 | 317 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = field(default=lowerCamelCase ,metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase_ = field(
default=lowerCamelCase ,metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase_ = field(
default=lowerCamelCase ,metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} ,)
lowerCamelCase_ = field(
default=lowerCamelCase ,metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} ,)
lowerCamelCase_ = field(
default=lowerCamelCase ,metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} ,)
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = super().to_dict()
for k, v in d.items():
if isinstance(a , a ):
_A = v.to_dict()
return d | 317 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 100_0000 ):
'''simple docstring'''
A : Tuple = limit + 1
A : Any = [0] * limit
for first_term in range(1 , snake_case__ ):
for n in range(snake_case__ , snake_case__ , snake_case__ ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : Tuple = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 343 | 0 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _A ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 41 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class A :
a_ = 42
a_ = 42
a_ = 42
@dataclass
class A :
a_ = 42
a_ = 42
a_ = None
a_ = None
class A ( UpperCAmelCase ):
a_ = '''train'''
a_ = '''dev'''
a_ = '''test'''
class A :
@staticmethod
def snake_case__ ( __a : List[Any] , __a : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def snake_case__ ( __a : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def snake_case__ ( __a : List[InputExample] , __a : List[str] , __a : int , __a : PreTrainedTokenizer , __a : Dict=False , __a : int="[CLS]" , __a : Dict=1 , __a : Tuple="[SEP]" , __a : Any=False , __a : Union[str, Any]=False , __a : Any=0 , __a : Optional[int]=0 , __a : Tuple=-1_0_0 , __a : Optional[Any]=0 , __a : int=True , ) -> List[InputFeatures]:
__UpperCAmelCase = {label: i for i, label in enumerate(__a )}
__UpperCAmelCase = []
for ex_index, example in enumerate(__a ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , __a , len(__a ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
for word, label in zip(example.words , example.labels ):
__UpperCAmelCase = tokenizer.tokenize(__a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__a ) > 0:
tokens.extend(__a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__UpperCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__a ) > max_seq_length - special_tokens_count:
__UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__UpperCAmelCase = [sequence_a_segment_id] * len(__a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__UpperCAmelCase = [cls_token] + tokens
__UpperCAmelCase = [pad_token_label_id] + label_ids
__UpperCAmelCase = [cls_token_segment_id] + segment_ids
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(__a )
# Zero-pad up to the sequence length.
__UpperCAmelCase = max_seq_length - len(__a )
if pad_on_left:
__UpperCAmelCase = ([pad_token] * padding_length) + input_ids
__UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
assert len(__a ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__a ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__a ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__a ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__a ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCAmelCase = None
features.append(
InputFeatures(
input_ids=__a , attention_mask=__a , token_type_ids=__a , label_ids=__a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A ( UpperCAmelCase ):
a_ = 42
a_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : Split = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
__UpperCAmelCase = os.path.join(
__a , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__UpperCAmelCase = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__UpperCAmelCase = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCAmelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __a )
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : int , __a : int ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A :
a_ = 42
a_ = -1_0_0
def __init__( self : Union[str, Any] , __a : TokenClassificationTask , __a : str , __a : PreTrainedTokenizer , __a : List[str] , __a : str , __a : Optional[int] = None , __a : Any=False , __a : Split = Split.train , ) -> Union[str, Any]:
__UpperCAmelCase = token_classification_task.read_examples_from_file(__a , __a )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCAmelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCAmelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__UpperCAmelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : int ) -> str:
return len(self.features )
def __getitem__( self : int , __a : List[Any] ) -> InputFeatures:
return self.features[i]
| 262 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
__A : int = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__A : Union[str, Any] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
__A : List[Any] = F"{src_lang}-{tgt_lang}"
__A : Union[str, Any] = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_lowercase , exist_ok=_lowercase )
__A : Tuple = os.path.join(_lowercase , "README.md" )
print(F"Generating {path}" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(_lowercase )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 387 | import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 384, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : str = image_size
__A : Optional[Any] = patch_size
__A : str = num_channels
__A : str = is_training
__A : Optional[Any] = use_labels
__A : Union[str, Any] = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = backbone_out_indices
__A : Dict = num_attention_heads
__A : Dict = intermediate_size
__A : Tuple = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = initializer_range
__A : List[str] = num_labels
__A : str = backbone_featmap_shape
__A : int = scope
__A : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__A : Optional[Any] = (image_size // patch_size) ** 2
__A : List[Any] = num_patches + 1
def __UpperCAmelCase( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
__A : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : Optional[int] = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = self.num_labels
__A : List[Any] = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : Dict = config_and_inputs
__A : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase_ : Dict = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : str = DPTModelTester(self )
__A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __UpperCAmelCase( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(__UpperCAmelCase )
__A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = True
if model_class in get_values(__UpperCAmelCase ):
continue
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__A : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : Optional[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = False
__A : Optional[int] = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
__A : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : List[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__A : List[Any] = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
__A : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__A : Any = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__A : Tuple = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __UpperCAmelCase( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = "add"
with self.assertRaises(__UpperCAmelCase ):
__A : int = DPTForDepthEstimation(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__A : List[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__UpperCAmelCase )
__A : Dict = prepare_img()
__A : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[str] = model(**__UpperCAmelCase )
__A : str = outputs.predicted_depth
# verify the predicted depth
__A : Optional[int] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
__A : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( lowercase : Sequence[int] | None = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_UpperCamelCase = nums[0]
for i in range(1, len(lowercase ) ):
_UpperCamelCase = nums[i]
_UpperCamelCase = max(lowercase, ans + num, lowercase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ : Optional[Any] = int(input('Enter number of elements : ').strip())
lowercase__ : List[Any] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 98 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return base * power(UpperCAmelCase_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
snake_case_ : int = int(input('Enter the base: ').strip())
snake_case_ : Optional[int] = int(input('Enter the exponent: ').strip())
snake_case_ : Optional[int] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case_ : List[Any] = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 195 | 0 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# Initialise PyTorch model
__lowerCAmelCase : Tuple = BigBirdConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
__lowerCAmelCase : str = BigBirdForQuestionAnswering(_UpperCamelCase )
else:
__lowerCAmelCase : Optional[Any] = BigBirdForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCamelCase , _UpperCamelCase , is_trivia_qa=_UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 549 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self ):
__lowerCAmelCase : Any = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = probability
def __lowerCamelCase ( self ):
return list(self.connections )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = Counter(graph.get_nodes() )
__lowerCAmelCase : Tuple = start
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : int = graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
_UpperCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(a ) , torch_builtin(a ) ) )
self.assertFalse(torch.allclose(gelu_python(a ) , gelu_new(a ) ) )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
_UpperCamelCase = get_activation("""gelu""" )
_UpperCamelCase = get_activation("""gelu_10""" )
_UpperCamelCase = torch_builtin(a )
_UpperCamelCase = geluaa(a )
_UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self ) -> Any:
'''simple docstring'''
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(a ):
get_activation("""bogus""" )
with self.assertRaises(a ):
get_activation(a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = get_activation("""gelu""" )
_UpperCamelCase = 1
_UpperCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(a ):
_UpperCamelCase = acta.a
| 612 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] = "pix2struct_text_model"
UpperCamelCase_ : str = ["past_key_values"]
UpperCamelCase_ : str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=5_02_44 , a=7_68 , a=64 , a=20_48 , a=12 , a=12 , a=32 , a=1_28 , a=0.1 , a=1e-6 , a=1.0 , a="gelu_new" , a=0 , a=False , a=0 , a=1 , a=False , a=True , **a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = use_cache
_UpperCamelCase = eos_token_id
_UpperCamelCase = decoder_start_token_id
# for backwards compatibility
_UpperCamelCase = dense_act_fn
super().__init__(
pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , tie_word_embeddings=a , is_decoder=a , **a , )
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "pix2struct_vision_model"
def __init__( self , a=7_68 , a=7_68 , a=20_48 , a=64 , a=12 , a=12 , a="gelu_new" , a=1e-6 , a=0.0 , a=0.0 , a=1e-10 , a=1.0 , a=40_96 , a=32 , a=1_28 , **a , ) -> Tuple:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = hidden_size
_UpperCamelCase = patch_embed_hidden_size
_UpperCamelCase = d_ff
_UpperCamelCase = dropout_rate
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_factor
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = dense_act_fn
_UpperCamelCase = seq_len
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = d_kv
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = "pix2struct"
UpperCamelCase_ : int = True
def __init__( self , a=None , a=None , a=1.0 , a=0.02 , a=False , a=False , a=True , **a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=a , is_encoder_decoder=a , **a )
if text_config is None:
_UpperCamelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_UpperCamelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_UpperCamelCase = PixaStructTextConfig(**a )
_UpperCamelCase = PixaStructVisionConfig(**a )
_UpperCamelCase = self.text_config.decoder_start_token_id
_UpperCamelCase = self.text_config.pad_token_id
_UpperCamelCase = self.text_config.eos_token_id
_UpperCamelCase = initializer_factor
_UpperCamelCase = initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = is_vqa
@classmethod
def A_ ( cls , a , a , **a ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 612 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__magic_name__ = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
__magic_name__ = '''▁'''
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : List[Any] = vocab_file
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
lowerCamelCase_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase_ : List[Any] = len(self.sp_model ) - 1
lowerCamelCase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
lowerCamelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _UpperCamelCase ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : List[Any] = self.sp_model.PieceToId(a_ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = ""
lowerCamelCase_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Any = []
else:
current_sub_tokens.append(a_ )
lowerCamelCase_ : Optional[int] = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCamelCase_ : Dict = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : List[str] = {}
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = {}
if prompt is not None:
lowercase__ : Tuple = prompt
if generate_kwargs is not None:
lowercase__ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase__ : int = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""")
lowercase__ : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Any = load_image(SCREAMING_SNAKE_CASE_)
if prompt is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
raise ValueError(
f'Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE_)} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""")
lowercase__ : Optional[Any] = self.model.config.model_type
if model_type == "git":
lowercase__ : Tuple = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : str = self.tokenizer(text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_).input_ids
lowercase__ : Tuple = [self.tokenizer.cls_token_id] + input_ids
lowercase__ : Dict = torch.tensor(SCREAMING_SNAKE_CASE_).unsqueeze(0)
model_inputs.update({"""input_ids""": input_ids})
elif model_type == "pix2struct":
lowercase__ : int = self.image_processor(images=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase__ : str = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
model_inputs.update(SCREAMING_SNAKE_CASE_)
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation')
else:
lowercase__ : Union[str, Any] = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
lowercase__ : Union[str, Any] = None
return model_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , SCREAMING_SNAKE_CASE_)
and all(x is None for x in model_inputs["""input_ids"""])
):
lowercase__ : Optional[Any] = None
if generate_kwargs is None:
lowercase__ : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase__ : Union[str, Any] = model_inputs.pop(self.model.main_input_name)
lowercase__ : Optional[Any] = self.model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = []
for output_ids in model_outputs:
lowercase__ : int = {
"""generated_text""": self.tokenizer.decode(
SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , )
}
records.append(SCREAMING_SNAKE_CASE_)
return records
| 12 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'codegen'
_a = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , lowerCAmelCase : int=5_0400 , lowerCAmelCase : Any=2048 , lowerCAmelCase : Optional[Any]=2048 , lowerCAmelCase : Dict=4096 , lowerCAmelCase : Optional[int]=28 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : Union[str, Any]=64 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[Any]="gelu_new" , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Any=1e-5 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Any=5_0256 , lowerCAmelCase : Optional[Any]=5_0256 , lowerCAmelCase : List[str]=False , **lowerCAmelCase : int , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = n_ctx
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = rotary_dim
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Any , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , ):
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , lowerCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase = 0
@property
def __lowercase ( self : List[str] ):
lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" )
lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowercase ( self : Union[str, Any] ):
return self._config.n_layer
@property
def __lowercase ( self : List[Any] ):
return self._config.n_head
def __lowercase ( self : int , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase = super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Optional[Any] ):
return 13
| 529 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : int , snake_case__ : Tuple ) -> int:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase = os.path.split(snake_case__ )[-1].split(""".""" )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(snake_case__ ),
"""epoch""": epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=snake_case__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 529 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.