code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCamelCase_ ( _a = "dhaka" , _a = 5 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = min(_a , 50 ) # Prevent abuse!
lowerCAmelCase__ : Dict = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
lowerCAmelCase__ : Any = requests.get('''https://www.google.com/search''' , params=_a , headers=_a )
lowerCAmelCase__ : List[str] = BeautifulSoup(html.text , '''html.parser''' )
lowerCAmelCase__ : Optional[int] = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
lowerCAmelCase__ : Tuple = json.dumps(_a )
lowerCAmelCase__ : List[Any] = json.loads(_a )
lowerCAmelCase__ : Any = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _a , )
if not matched_google_image_data:
return 0
lowerCAmelCase__ : str = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_a ) , )
lowerCAmelCase__ : int = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _a , )
for index, fixed_full_res_image in enumerate(_a ):
if index >= max_images:
return index
lowerCAmelCase__ : List[Any] = bytes(_a , '''ascii''' ).decode(
'''unicode-escape''' )
lowerCAmelCase__ : List[Any] = bytes(_a , '''ascii''' ).decode(
'''unicode-escape''' )
lowerCAmelCase__ : Tuple = urllib.request.build_opener()
lowerCAmelCase__ : Tuple = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_a )
lowerCAmelCase__ : int = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(_a ):
os.makedirs(_a )
urllib.request.urlretrieve( # noqa: S310
_a , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowerCamelCase = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 211 |
# using dfs for finding eulerian path traversal
def lowerCamelCase_ ( _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = True, True
lowerCAmelCase__ : Any = dfs(_a , _a , _a , _a )
return path
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = -1
for i in range(_a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = check_circuit_or_path(_a , _a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase__ : Optional[int] = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase__ : Optional[int] = dfs(_a , _a , _a )
print(_a )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[Any] = 10
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
if __name__ == "__main__":
main()
| 211 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase = 16
__lowerCamelCase = 32
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = 16 ):
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
A__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
set_seed(UpperCamelCase__ )
A__ , A__ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
A__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCamelCase__ )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 221 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 221 | 1 |
'''simple docstring'''
__snake_case = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__snake_case = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def a ( __a , __a , __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__a , __a , __a )
order.append(__a )
return order
def a ( __a , __a , __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__a , __a , __a )
return component
def a ( __a ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase__ :List[str] = len(__a ) * [False]
UpperCamelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(__a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__a )
UpperCamelCase__ :Optional[int] = []
for i, was_visited in enumerate(__a ):
if not was_visited:
order += topology_sort(__a , __a , __a )
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :str = len(__a ) * [False]
for i in range(len(__a ) ):
UpperCamelCase__ :List[Any] = order[len(__a ) - i - 1]
if not visited[vert]:
UpperCamelCase__ :Optional[Any] = find_components(__a , __a , __a )
components_list.append(__a )
return components_list
| 219 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case = logging.getLogger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = 'masked_bert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="topK" , UpperCamelCase_="constant" , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :str = pruning_method
UpperCamelCase__ :int = mask_init
UpperCamelCase__ :Optional[Any] = mask_scale
| 219 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 80 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase__=64 , lowerCamelCase__=48_000 , lowerCamelCase__=480 , lowerCamelCase__=10 , lowerCamelCase__=1_024 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 14_000 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = top_db
__lowerCamelCase = truncation
__lowerCamelCase = padding
__lowerCamelCase = fft_window_size
__lowerCamelCase = (fft_window_size >> 1) + 1
__lowerCamelCase = hop_length
__lowerCamelCase = max_length_s
__lowerCamelCase = max_length_s * sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = frequency_min
__lowerCamelCase = frequency_max
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='htk' , )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='slaney' , mel_scale='slaney' , )
def lowercase_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
# randomly choose index for each part
__lowerCamelCase = np.random.choice(ranges[0] )
__lowerCamelCase = np.random.choice(ranges[1] )
__lowerCamelCase = np.random.choice(ranges[2] )
__lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase = torch.tensor(mel[None, None, :] )
__lowerCamelCase = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = mel_shrink[0][0].numpy()
__lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase = len(lowerCamelCase__ ) - max_length
__lowerCamelCase = np.random.randint(0 , overflow + 1 )
__lowerCamelCase = waveform[idx : idx + max_length]
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase = False
else:
__lowerCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__lowerCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
__lowerCamelCase = truncation if truncation is not None else self.truncation
__lowerCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__lowerCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__lowerCamelCase = []
__lowerCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) )
__lowerCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase = [[longer] for longer in is_longer]
__lowerCamelCase = {'input_features': input_mel, 'is_longer': is_longer}
__lowerCamelCase = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__lowerCamelCase = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 90 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : int , _A : Tuple=13 , _A : List[str]=7 , _A : int=True , _A : Union[str, Any]=True , _A : List[Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=99 , _A : Any=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Union[str, Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[Any]=512 , _A : List[Any]=16 , _A : List[str]=2 , _A : List[Any]=0.0_2 , _A : List[str]=False , _A : Optional[int]=True , _A : Any="None" , _A : Optional[int]=3 , _A : int=4 , _A : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Dict = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : str = relative_attention
UpperCAmelCase__ : Union[str, Any] = position_biased_input
UpperCAmelCase__ : Optional[Any] = pos_att_type
UpperCAmelCase__ : List[str] = scope
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self : str , _A : Union[str, Any] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self : str , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Any , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0]
UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self : Tuple , _A : str , _A : Optional[Any] , _A : Union[str, Any] , _A : int , _A : Optional[int] , _A : List[str] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , _A : str , _A : int , _A : Tuple , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : List[str] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def lowercase_ ( self : Optional[int] , _A : int , _A : int , _A : Union[str, Any] , _A : Any , _A : int , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.num_labels
UpperCAmelCase__ : str = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Dict = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Any , _A : List[Any] , _A : Tuple , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Any , _A : Tuple , _A : Optional[Any] , _A : Tuple , _A : List[str] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Dict = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase__ : str = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 299 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : int = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Tuple = '''weight'''
else:
UpperCAmelCase__ : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : int = UniSpeechSatConfig()
UpperCAmelCase__ : Tuple = ''''''
if is_finetuned:
UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A__ ( lowerCamelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
UpperCamelCase_: str = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
UpperCamelCase_: Any = soup.findAll("""h1""" )
UpperCamelCase_: Optional[int] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 353 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : List[Any] = """AutoImageProcessor"""
__UpperCamelCase : Tuple = """AutoTokenizer"""
def __init__( self : Any , snake_case_ : Optional[Any] , snake_case_ : Any ):
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase_: int = self.image_processor
def __call__( self : str , snake_case_ : Optional[int]=None , snake_case_ : int=None , snake_case_ : Dict=None , **snake_case_ : Optional[int] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: List[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
UpperCamelCase_: Any = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
UpperCamelCase_: Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowerCAmelCase__ ( self : List[str] , *snake_case_ : int , **snake_case_ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : int , *snake_case_ : Optional[Any] , **snake_case_ : str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 223 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case = None, __snake_case = None, __snake_case = False, ) -> tuple[int, float, str]:
"""simple docstring"""
_UpperCamelCase = cipher_alphabet or [chr(__lowerCAmelCase ) for i in range(97, 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(__lowerCAmelCase ) ):
_UpperCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
__lowerCAmelCase, key=__lowerCAmelCase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 194 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Dict:
'''simple docstring'''
a__ : List[str] =size if size is not None else {"height": 1_8, "width": 1_8}
a__ : Dict =parent
a__ : int =batch_size
a__ : Dict =num_channels
a__ : Any =image_size
a__ : Union[str, Any] =min_resolution
a__ : Union[str, Any] =max_resolution
a__ : Dict =do_resize
a__ : Tuple =size
a__ : str =do_normalize
def _lowercase ( self ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] =ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "clusters" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
a__ : int =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] =os.path.join(lowerCAmelCase__ , "image_processor.json" )
image_processor_first.to_json_file(lowerCAmelCase__ )
a__ : List[Any] =self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
a__ : List[str] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
a__ : List[str] =self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
a__ : Any =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
pass
def _A ( ):
"""simple docstring"""
a__ : Any =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
a__ : Tuple =Image.open(dataset[4]["file"] )
a__ : Union[str, Any] =Image.open(dataset[5]["file"] )
a__ : List[str] =[imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
a__ : str =prepare_images()
# test non-batched
a__ : List[str] =image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
a__ : Tuple =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
a__ : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 148 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Tuple =path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
a__ : List[str] =Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
if self.streaming:
a__ : str =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a__ : Dict =None
a__ : Optional[Any] =None
a__ : Union[str, Any] =None
a__ : Tuple =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
a__ : Tuple =self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 148 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE__ = ['prompt']
SCREAMING_SNAKE_CASE__ = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE__ = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :int = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a :str = PriorTransformer(**_lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a :str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a :Tuple = CLIPVisionModelWithProjection(_lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.dummy_prior
a :int = self.dummy_image_encoder
a :Any = self.dummy_text_encoder
a :List[str] = self.dummy_tokenizer
a :Union[str, Any] = self.dummy_image_processor
a :List[Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , clip_sample_range=10.0 , )
a :Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :str = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''cpu'''
a :Tuple = self.get_dummy_components()
a :Optional[int] = self.pipeline_class(**_lowerCamelCase )
a :Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
a :Optional[Any] = output.image_embeds
a :Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
a :Tuple = image[0, -10:]
a :int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a :Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch_device == '''cpu'''
a :Union[str, Any] = True
a :int = False
self._test_inference_batch_single_identical(
test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , test_mean_pixel_difference=_lowerCamelCase , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch_device == '''cpu'''
a :Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowerCamelCase , test_mean_pixel_difference=_lowerCamelCase , )
| 94 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = "ResNetConfig"
# Base docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = [1, 20_48, 7, 7]
# Image classification docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = "tiger cat"
__lowerCamelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Any:
super().__init__()
A__ = nn.Convad(
__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Any:
super().__init__()
A__ = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A__ = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A__ = config.num_channels
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A__ = self.embedder(__UpperCAmelCase )
A__ = self.pooler(__UpperCAmelCase )
return embedding
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Optional[Any]:
super().__init__()
A__ = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = out_channels // reduction
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> Any:
super().__init__()
A__ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = input
for layer in self.layers:
A__ = layer(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ):
self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention:
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(__UpperCAmelCase )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = ResNetConfig
lowerCAmelCase__ : str = 'resnet'
lowerCAmelCase__ : int = 'pixel_values'
lowerCAmelCase__ : Any = True
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
if isinstance(__UpperCAmelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = value
__lowerCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]:
super().__init__(__UpperCAmelCase )
A__ = config
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(
__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = encoder_outputs[0]
A__ = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Tuple:
super().__init__(__UpperCAmelCase )
A__ = config.num_labels
A__ = ResNetModel(__UpperCAmelCase )
# classification head
A__ = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case__ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> ImageClassifierOutputWithNoAttention:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(__UpperCAmelCase )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = 'single_label_classification'
else:
A__ = 'multi_label_classification'
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __A , )
class UpperCamelCase__( __A , __A ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__(__UpperCAmelCase )
super()._init_backbone(__UpperCAmelCase )
A__ = [config.embedding_size] + config.hidden_sizes
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BackboneOutput:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.hidden_states
A__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
| 221 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A : str = get_tests_dir('''fixtures''')
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any]):
# A mock response for an HTTP head request to emulate server down
_A : int = mock.Mock()
_A : List[str] = 500
_A : List[Any] = {}
_A : List[Any] = HTTPError
_A : str = {}
# Download this model to make sure it's in the cache.
_A : Any = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE) as mock_head:
_A : List[str] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : Union[str, Any]):
# This test is for deprecated behavior and can be removed in v5
_A : List[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def A ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
_A : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
_A : str = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor')
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : int):
_A : Union[str, Any] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE)
@classmethod
def A ( cls : str):
try:
delete_repo(token=cls._token , repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def A ( self : List[str]):
_A : Any = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token)
_A : Optional[int] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE , repo_id='test-image-processor' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token)
_A : str = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
def A ( self : List[str]):
_A : List[Any] = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token)
_A : Optional[int] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE , repo_id='valid_org/test-image-processor-org' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token)
_A : List[str] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
def A ( self : int):
CustomImageProcessor.register_for_auto_class()
_A : List[Any] = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
_A : Tuple = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor')
| 227 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ):
_A : Dict = multiprocessing.Manager()
_A : List[Any] = manager.list()
_A : Dict = multiprocessing.Process(target=lowerCamelCase ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_A : Any = shutil.rmtree
_A : Optional[int] = os.rmdir
_A : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_A : str = {}
with swallow_io():
with time_limit(lowerCamelCase ):
exec(lowerCamelCase ,lowerCamelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
_A : Optional[int] = rmtree
_A : Optional[Any] = rmdir
_A : Dict = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : int ):
def signal_handler(lowerCamelCase : str ,lowerCamelCase : Any ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL ,lowerCamelCase )
signal.signal(signal.SIGALRM ,lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
_A : Any = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase ):
with contextlib.redirect_stderr(lowerCamelCase ):
with redirect_stdin(lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase ):
yield dirname
class __lowerCamelCase ( a_ ):
"""simple docstring"""
pass
class __lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def A ( self : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any]):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int]):
raise OSError
def A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple):
return False
class __lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
a = "stdin"
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : Tuple ):
if root == ".":
yield
return
_A : Any = os.getcwd()
os.chdir(lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : List[Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_A : List[Any] = None
_A : Dict = None
import os
_A : Union[str, Any] = '1'
_A : int = None
_A : Optional[int] = None
_A : int = None
_A : Any = None
_A : Optional[int] = None
_A : Union[str, Any] = None
_A : List[Any] = None
_A : int = None
_A : List[Any] = None
_A : Tuple = None
_A : Any = None
_A : Tuple = None
_A : Optional[int] = None
_A : Optional[Any] = None
_A : str = None
_A : Dict = None
_A : List[str] = None
_A : Union[str, Any] = None
_A : Union[str, Any] = None
_A : str = None
_A : str = None
_A : str = None
_A : Any = None
_A : Union[str, Any] = None
_A : str = None
_A : List[str] = None
_A : Union[str, Any] = None
import shutil
_A : int = None
_A : Any = None
_A : List[Any] = None
import subprocess
_A : Optional[Any] = None # type: ignore
_A : List[Any] = None
import sys
_A : Any = None
_A : Tuple = None
_A : str = None
_A : Tuple = None
_A : List[str] = None
| 227 | 1 |
import operator as op
_lowercase: List[str] = "scaler.pt"
_lowercase: int = "pytorch_model"
_lowercase: Optional[Any] = "random_states"
_lowercase: Optional[Any] = "optimizer"
_lowercase: Dict = "scheduler"
_lowercase: Union[str, Any] = "pytorch_model.bin"
_lowercase: List[str] = "pytorch_model.bin.index.json"
_lowercase: Optional[int] = "model.safetensors"
_lowercase: List[str] = "model.safetensors.index.json"
_lowercase: str = "1.10.2"
_lowercase: str = "py38"
_lowercase: Dict = "4.17.0"
_lowercase: Tuple = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_lowercase: List[str] = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_lowercase: List[Any] = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_lowercase: List[Any] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_lowercase: Tuple = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_lowercase: int = "2.0.1"
_lowercase: Optional[Any] = ["pdsh", "standard", "openmpi", "mvapich"]
_lowercase: str = ["default", "reduce-overhead", "max-autotune"]
_lowercase: Optional[int] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase: List[str] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_lowercase: Tuple = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_lowercase: Union[str, Any] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 227 |
_lowercase: Dict = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def a( A : str ) -> int:
"""simple docstring"""
a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a = 0
a = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a( A : int ) -> str:
"""simple docstring"""
a = []
for arabic, roman in ROMAN:
((a) , (a)) = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
_A : Dict = '''wavlm'''
def __init__( self : List[Any] , __a : Any=32 , __a : str=768 , __a : List[str]=12 , __a : Optional[Any]=12 , __a : Union[str, Any]=3072 , __a : Dict="gelu" , __a : Union[str, Any]=0.1 , __a : str=0.1 , __a : List[Any]=0.1 , __a : Optional[Any]=0.0 , __a : Dict=0.1 , __a : Optional[int]=0.1 , __a : List[str]=0.02 , __a : Any=1E-5 , __a : str="group" , __a : int="gelu" , __a : str=(512, 512, 512, 512, 512, 512, 512) , __a : Any=(5, 2, 2, 2, 2, 2, 2) , __a : List[str]=(10, 3, 3, 3, 3, 2, 2) , __a : List[str]=False , __a : int=128 , __a : Optional[Any]=16 , __a : Tuple=320 , __a : Optional[int]=800 , __a : Tuple=False , __a : Any=True , __a : str=0.05 , __a : Optional[Any]=10 , __a : Any=2 , __a : int=0.0 , __a : int=10 , __a : Optional[int]=320 , __a : List[Any]=2 , __a : Any=0.1 , __a : Tuple=100 , __a : Tuple=256 , __a : int=256 , __a : Tuple=0.1 , __a : Optional[int]="mean" , __a : List[str]=False , __a : Union[str, Any]=False , __a : Optional[int]=256 , __a : Any=(512, 512, 512, 512, 1500) , __a : Union[str, Any]=(5, 3, 3, 1, 1) , __a : Union[str, Any]=(1, 2, 3, 1, 1) , __a : Tuple=512 , __a : Union[str, Any]=80 , __a : int=0 , __a : Dict=1 , __a : Tuple=2 , __a : str=False , __a : Any=3 , __a : Any=2 , __a : Union[str, Any]=3 , __a : List[str]=None , **__a : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__lowercase : Any = hidden_size
__lowercase : Union[str, Any] = feat_extract_norm
__lowercase : List[str] = feat_extract_activation
__lowercase : Optional[int] = list(_a )
__lowercase : str = list(_a )
__lowercase : Dict = list(_a )
__lowercase : int = conv_bias
__lowercase : Dict = num_buckets
__lowercase : Optional[Any] = max_bucket_distance
__lowercase : List[str] = num_conv_pos_embeddings
__lowercase : int = num_conv_pos_embedding_groups
__lowercase : Union[str, Any] = len(self.conv_dim )
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Any = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Optional[int] = num_attention_heads
__lowercase : str = hidden_dropout
__lowercase : Tuple = attention_dropout
__lowercase : Union[str, Any] = activation_dropout
__lowercase : str = feat_proj_dropout
__lowercase : str = final_dropout
__lowercase : List[str] = layerdrop
__lowercase : List[str] = layer_norm_eps
__lowercase : str = initializer_range
__lowercase : str = num_ctc_classes
__lowercase : Union[str, Any] = vocab_size
__lowercase : Dict = do_stable_layer_norm
__lowercase : Optional[int] = use_weighted_layer_sum
__lowercase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : List[str] = apply_spec_augment
__lowercase : Tuple = mask_time_prob
__lowercase : Optional[Any] = mask_time_length
__lowercase : List[str] = mask_time_min_masks
__lowercase : Tuple = mask_feature_prob
__lowercase : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase : List[Any] = num_codevectors_per_group
__lowercase : Any = num_codevector_groups
__lowercase : Any = contrastive_logits_temperature
__lowercase : Union[str, Any] = num_negatives
__lowercase : Tuple = codevector_dim
__lowercase : Tuple = proj_codevector_dim
__lowercase : List[Any] = diversity_loss_weight
# ctc loss
__lowercase : int = ctc_loss_reduction
__lowercase : List[Any] = ctc_zero_infinity
# adapter
__lowercase : Optional[Any] = add_adapter
__lowercase : int = adapter_kernel_size
__lowercase : List[Any] = adapter_stride
__lowercase : Tuple = num_adapter_layers
__lowercase : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase : Optional[int] = list(_a )
__lowercase : Dict = list(_a )
__lowercase : str = list(_a )
__lowercase : List[str] = xvector_output_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 355 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306 | 0 |
from datetime import datetime
import requests
def A_ ( snake_case : str ) -> bytes:
'''simple docstring'''
__UpperCamelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__UpperCamelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(snake_case ).content
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter Video/IGTV url: ").strip()
lowercase__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 328 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = ["model.decoder.embed_positions.weights"]
def A_ ( snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A_ ( snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__UpperCamelCase = 1024
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 1536
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 2048
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case )
__UpperCamelCase = decoder_config_from_checkpoint(snake_case )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__UpperCamelCase = 2048
__UpperCamelCase = 2048
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Dict = """mgp-str"""
def __init__( self , _A=[3_2, 1_2_8] , _A=4 , _A=3 , _A=2_7 , _A=3_8 , _A=5_0_2_5_7 , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=4.0 , _A=True , _A=False , _A=1E-5 , _A=0.0 , _A=0.0 , _A=0.0 , _A=False , _A=0.02 , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = max_token_length
__lowerCAmelCase = num_character_labels
__lowerCAmelCase = num_bpe_labels
__lowerCAmelCase = num_wordpiece_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = distilled
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = drop_rate
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attn_drop_rate
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = output_aa_attentions
__lowerCAmelCase = initializer_range
| 102 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 102 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case__:
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
pass
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> int:
lowerCAmelCase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(__lowercase )
lowerCAmelCase_ : List[Any] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
lowerCAmelCase_ : Tuple = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.get_vision_text_model(__lowercase , __lowercase )
lowerCAmelCase_ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
lowerCAmelCase_ : Optional[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
lowerCAmelCase_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase )
lowerCAmelCase_ : Any = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
lowerCAmelCase_ : List[str] = after_output[0]
lowerCAmelCase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-3 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.get_vision_text_model(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
lowerCAmelCase_ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : Union[str, Any] = to_atuple(vision_model.config.image_size )
lowerCAmelCase_ : int = to_atuple(vision_model.config.patch_size )
lowerCAmelCase_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase_ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase_ : int = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
pt_model.to(__lowercase )
pt_model.eval()
# prepare inputs
lowerCAmelCase_ : Dict = inputs_dict
lowerCAmelCase_ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model(**__lowercase ).to_tuple()
lowerCAmelCase_ : str = fx_model(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowercase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ : Optional[Any] = fx_model_loaded(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__lowercase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = VisionTextDualEncoderModel.from_pretrained(__lowercase , from_flax=__lowercase )
pt_model_loaded.to(__lowercase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase_ : List[Any] = pt_model_loaded(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__lowercase , pt_output_loaded.numpy() , 4e-2 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = VisionTextDualEncoderModel(__lowercase )
lowerCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(__lowercase )
lowerCAmelCase_ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowercase )
lowerCAmelCase_ : Dict = fx_state
self.check_pt_flax_equivalence(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
lowerCAmelCase_ : int = VisionTextDualEncoderModel(__lowercase )
lowerCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel(__lowercase )
lowerCAmelCase_ : Dict = load_flax_weights_in_pytorch_model(__lowercase , fx_model.params )
self.check_pt_flax_equivalence(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = config_inputs_dict.pop('''vision_config''' )
lowerCAmelCase_ : Optional[Any] = config_inputs_dict.pop('''text_config''' )
lowerCAmelCase_ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__lowercase , __lowercase , __lowercase )
self.check_equivalence_flax_to_pt(__lowercase , __lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.get_pretrained_model_and_inputs()
lowerCAmelCase_ : Tuple = model_a(**__lowercase )
lowerCAmelCase_ : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__lowercase )
lowerCAmelCase_ : str = model_a(**__lowercase )
lowerCAmelCase_ : int = after_outputs[0]
lowerCAmelCase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_flax
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowercase , text_from_pt=__lowercase , )
lowerCAmelCase_ : int = 1_3
lowerCAmelCase_ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase_ : List[str] = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase_ ( self , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : Any = FlaxViTModel(__lowercase )
lowerCAmelCase_ : List[Any] = FlaxBertModel(__lowercase )
return vision_model, text_model
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = FlaxViTModelTester(self )
lowerCAmelCase_ : List[str] = FlaxBertModelTester(self )
lowerCAmelCase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : int = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = vision_config_and_inputs
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__lowercase , text_from_pt=__lowercase , )
lowerCAmelCase_ : Optional[int] = 1_3
lowerCAmelCase_ : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase_ : List[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = FlaxCLIPVisionModel(__lowercase )
lowerCAmelCase_ : Tuple = FlaxBertModel(__lowercase )
return vision_model, text_model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Any = FlaxCLIPVisionModelTester(self )
lowerCAmelCase_ : List[Any] = FlaxBertModelTester(self )
lowerCAmelCase_ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = vision_config_and_inputs
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
lowerCAmelCase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCAmelCase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase_ : Optional[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : List[str] = model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __lowercase , atol=1e-3 ) )
| 262 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 262 | 1 |
"""simple docstring"""
from manim import *
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""CPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""GPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Model""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase =Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 341 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
UpperCAmelCase_ = "laion/clap-htsat-unfused"
UpperCAmelCase_ = tempfile.mkdtemp()
def _lowercase (self : Dict , **__a : List[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str , **__a : Union[str, Any] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
UpperCAmelCase_ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = floats_list((3, 1000) )
UpperCAmelCase_ = feature_extractor(__a , return_tensors="np" )
UpperCAmelCase_ = processor(audios=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = "This is a test string"
UpperCAmelCase_ = processor(text=__a )
UpperCAmelCase_ = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__a )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase_ : List[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
__a = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase__ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
__a = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__a = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__a = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
__a = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowerCamelCase__ , 'w' , newline='\n' ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , 'r' ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCamelCase__ ) , )
# Copy consistency with a really long name
__a = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCamelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCamelCase__ ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
__a = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['format_model_list'] )
self.assertFalse(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
__a = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase__ )
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 357 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a__ ( __snake_case ):
A__ : torch.FloatTensor
A__ : torch.FloatTensor
A__ : Optional[torch.FloatTensor] = None
class a__ ( __snake_case , __snake_case ):
A__ : Optional[Any] = 2
@register_to_config
def __init__( self , UpperCAmelCase = 0.02 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 1.007 , UpperCAmelCase = 8_0 , UpperCAmelCase = 0.05 , UpperCAmelCase = 5_0 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
__a = sigma_max
# setable values
__a = None
__a = None
__a = None # sigma(t_i)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> int:
__a = num_inference_steps
__a = np.arange(0 , self.num_inference_steps )[::-1].copy()
__a = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
__a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__a = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__a = 0
# sample eps ~ N(0, S_noise^2 * I)
__a = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
__a = sigma + gamma * sigma
__a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
__a = sample_hat + sigma_hat * model_output
__a = (sample_hat - pred_original_sample) / sigma_hat
__a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
__a = sample_prev + sigma_prev * model_output
__a = (sample_prev - pred_original_sample) / sigma_prev
__a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
raise NotImplementedError()
| 197 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
lowerCAmelCase__ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
lowerCAmelCase__ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
lowerCAmelCase__ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
_A : Tuple = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
_A : Tuple = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : str = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
_A : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
_A : Dict = score.BleurtScorer(os.path.join(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = self.scorer.score(references=__lowerCamelCase , candidates=__lowerCamelCase)
return {"scores": scores}
| 11 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Any ={
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _A ( lowerCAmelCase ):
snake_case__ : str = 'facebook/nllb-200-distilled-600M'
snake_case__ : str = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ : Optional[int] = 'translator'
snake_case__ : int = AutoTokenizer
snake_case__ : str = AutoModelForSeqaSeqLM
snake_case__ : List[Any] = LANGUAGE_CODES
snake_case__ : Optional[Any] = ['text', 'text', 'text']
snake_case__ : List[str] = ['text']
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
lowercase = self.lang_to_code[src_lang]
lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowerCAmelCase , return_tensors="""pt""" , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCAmelCase )
| 32 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32 | 1 |
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def A_ ( ) -> None:
UpperCamelCase : List[Any] = input("Enter message: " )
UpperCamelCase : str = input("Enter key [alphanumeric]: " )
UpperCamelCase : Any = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
UpperCamelCase : Optional[Any] = "encrypt"
UpperCamelCase : Any = encrypt_message(_lowerCAmelCase , _lowerCAmelCase )
elif mode.lower().startswith("d" ):
UpperCamelCase : Any = "decrypt"
UpperCamelCase : Dict = decrypt_message(_lowerCAmelCase , _lowerCAmelCase )
print(F"""\n{mode.title()}ed message:""" )
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "encrypt" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "decrypt" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : List[str] = []
UpperCamelCase : int = 0
UpperCamelCase : List[str] = key.upper()
for symbol in message:
UpperCamelCase : Union[str, Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
else:
translated.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 52 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( _snake_case , _snake_case ):
@register_to_config
def __init__( self , _lowerCamelCase = 128 , _lowerCamelCase = 256 , _lowerCamelCase = 2000.0 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = 64 , _lowerCamelCase = 2048 , _lowerCamelCase = 0.1 , ):
super().__init__()
a :Union[str, Any] = nn.Sequential(
nn.Linear(_lowerCamelCase , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , )
a :Union[str, Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
a :Tuple = False
a :Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
a :Any = nn.Dropout(p=_lowerCamelCase )
a :List[Any] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
# FiLM conditional T5 decoder
a :Tuple = DecoderLayer(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
self.decoders.append(_lowerCamelCase )
a :Union[str, Any] = TaLayerNorm(_lowerCamelCase )
a :Optional[int] = nn.Dropout(p=_lowerCamelCase )
a :str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a :str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
a :Any = self.conditioning_emb(_lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a :str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a :Any = torch.broadcast_to(
torch.arange(_lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
a :Tuple = self.position_encoding(_lowerCamelCase )
a :Any = self.continuous_inputs_projection(_lowerCamelCase )
inputs += position_encodings
a :int = self.dropout(_lowerCamelCase )
# decoder: No padding present.
a :str = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
a :List[Any] = [(x, self.encoder_decoder_mask(_lowerCamelCase , _lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a :str = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
a :str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
a :List[str] = lyr(
_lowerCamelCase , conditioning_emb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )[0]
a :Tuple = self.decoder_norm(_lowerCamelCase )
a :str = self.post_dropout(_lowerCamelCase )
a :Tuple = self.spec_out(_lowerCamelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-6 ):
super().__init__()
a :List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
a :str = self.layer[0](
_lowerCamelCase , conditioning_emb=_lowerCamelCase , attention_mask=_lowerCamelCase , )
if encoder_hidden_states is not None:
a :Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
a :Any = self.layer[1](
_lowerCamelCase , key_value_states=_lowerCamelCase , attention_mask=_lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
a :int = self.layer[-1](_lowerCamelCase , _lowerCamelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
a :Any = TaLayerNorm(_lowerCamelCase )
a :Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
a :str = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
a :Optional[Any] = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
# pre_self_attention_layer_norm
a :int = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
a :Dict = self.FiLMLayer(_lowerCamelCase , _lowerCamelCase )
# Self-attention block
a :int = self.attention(_lowerCamelCase )
a :str = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
a :Optional[Any] = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
a :List[Any] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
a :str = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
a :Optional[int] = self.layer_norm(_lowerCamelCase )
a :int = self.attention(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
a :Optional[int] = hidden_states + self.dropout(_lowerCamelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
a :int = TaDenseGatedActDense(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
a :int = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
a :List[str] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
a :Any = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None ):
a :str = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
a :Union[str, Any] = self.film(_lowerCamelCase , _lowerCamelCase )
a :Any = self.DenseReluDense(_lowerCamelCase )
a :Any = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
a :int = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
a :Optional[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
a :Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
a :Dict = nn.Dropout(_lowerCamelCase )
a :Optional[Any] = NewGELUActivation()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[Any] = self.act(self.wi_a(_lowerCamelCase ) )
a :Any = self.wi_a(_lowerCamelCase )
a :Union[str, Any] = hidden_gelu * hidden_linear
a :str = self.dropout(_lowerCamelCase )
a :Dict = self.wo(_lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=1e-6 ):
super().__init__()
a :str = nn.Parameter(torch.ones(_lowerCamelCase ) )
a :Dict = eps
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
a :Any = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowerCamelCase )
a :List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a :Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowerCamelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
a :List[Any] = nn.Linear(_lowerCamelCase , out_features * 2 , bias=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :str = self.scale_bias(_lowerCamelCase )
a :Any = torch.chunk(_lowerCamelCase , 2 , -1 )
a :Optional[int] = x * (1 + scale) + shift
return x
| 371 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = num_channels
a :List[str] = image_size
a :int = width_coefficient
a :Optional[Any] = depth_coefficient
a :Any = depth_divisor
a :Any = kernel_sizes
a :Tuple = in_channels
a :Union[str, Any] = out_channels
a :Any = depthwise_padding
a :Any = strides
a :Optional[Any] = num_block_repeats
a :Tuple = expand_ratios
a :Dict = squeeze_expansion_ratio
a :int = hidden_act
a :Dict = hidden_dim
a :Tuple = pooling_type
a :Any = initializer_range
a :Tuple = batch_norm_eps
a :Optional[int] = batch_norm_momentum
a :List[Any] = dropout_rate
a :Optional[int] = drop_connect_rate
a :Tuple = sum(_lowerCamelCase ) * 4
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-5
| 281 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a__: Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a__: str = 256_047
a__: Union[str, Any] = 256_145
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = NllbTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ = NllbTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = NllbTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase )
A__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],)
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
],)
A__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
],)
A__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
],)
def UpperCamelCase ( self ):
A__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCamelCase )
A__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCamelCase,__lowerCamelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCamelCase )
A__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCamelCase,legacy_format=__lowerCamelCase )
A__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase,__lowerCamelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCamelCase )
A__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCamelCase,legacy_format=__lowerCamelCase )
A__ = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCamelCase )
A__ = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
def UpperCamelCase ( self ):
if not self.test_seqaseq:
return
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
A__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
A__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase,tgt_texts=__lowerCamelCase,max_length=3,max_target_length=10,return_tensors='''pt''',src_lang='''eng_Latn''',tgt_lang='''ron_Latn''',)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.labels.shape[1],10 )
# max_target_length will default to max_length if not specified
A__ = tokenizer.prepare_seqaseq_batch(
__lowerCamelCase,tgt_texts=__lowerCamelCase,max_length=3,return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.labels.shape[1],3 )
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase,max_length=3,max_target_length=10,return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1],3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1],3 )
self.assertNotIn('''decoder_input_ids''',__lowerCamelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = [AddedToken('''<special>''',lstrip=__lowerCamelCase )]
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase,additional_special_tokens=__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_r.encode('''Hey this is a <special> token''' )
A__ = tokenizer_r.encode('''<special>''',add_special_tokens=__lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase,additional_special_tokens=__lowerCamelCase,**__lowerCamelCase,)
A__ = self.tokenizer_class.from_pretrained(
__lowerCamelCase,additional_special_tokens=__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_p.encode('''Hey this is a <special> token''' )
A__ = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = '''facebook/nllb-200-distilled-600M'''
__SCREAMING_SNAKE_CASE = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__SCREAMING_SNAKE_CASE = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__SCREAMING_SNAKE_CASE = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def UpperCamelCase ( cls ):
A__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name,src_lang='''eng_Latn''',tgt_lang='''ron_Latn''' )
A__ = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''],25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''],25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''],25_6057 )
def UpperCamelCase ( self ):
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens,__lowerCamelCase )
def UpperCamelCase ( self ):
self.assertIn(__lowerCamelCase,self.tokenizer.all_special_ids )
# fmt: off
A__ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
A__ = self.tokenizer.decode(__lowerCamelCase,skip_special_tokens=__lowerCamelCase )
A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0],__lowerCamelCase )
A__ = 10
A__ = self.tokenizer(__lowerCamelCase,max_length=__lowerCamelCase,truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1],2 )
self.assertEqual(ids[0],__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ),[25_6203, 3] )
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
A__ = NllbTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids,__lowerCamelCase )
@require_torch
def UpperCamelCase ( self ):
A__ = self.tokenizer(
self.src_text,text_target=self.tgt_text,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=len(self.expected_src_tokens ),return_tensors='''pt''',)
A__ = shift_tokens_right(
batch['''labels'''],self.tokenizer.pad_token_id,self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
self.assertEqual((2, 15),batch.input_ids.shape )
self.assertEqual((2, 15),batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens,__lowerCamelCase )
self.assertEqual(__lowerCamelCase,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
def UpperCamelCase ( self ):
A__ = self.tokenizer(self.src_text,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=3,return_tensors='''pt''' )
A__ = self.tokenizer(
text_target=self.tgt_text,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=10,return_tensors='''pt''' )
A__ = targets['''input_ids''']
A__ = shift_tokens_right(
__lowerCamelCase,self.tokenizer.pad_token_id,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang],)
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.decoder_input_ids.shape[1],10 )
@require_torch
def UpperCamelCase ( self ):
A__ = self.tokenizer._build_translation_inputs(
'''A test''',return_tensors='''pt''',src_lang='''eng_Latn''',tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__lowerCamelCase ),{
# A, test, EOS, en_XX
'''input_ids''': [[25_6047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_6057,
},)
@require_torch
def UpperCamelCase ( self ):
A__ = True
A__ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''',src_lang='''eng_Latn''',tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
A__ = False
A__ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''',src_lang='''eng_Latn''',tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 193 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = set_counts
A__ = max(__lowerCamelCase )
A__ = len(__lowerCamelCase )
A__ = [1] * num_sets
A__ = list(range(__lowerCamelCase ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.get_parent(__lowerCamelCase )
A__ = self.get_parent(__lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set,__lowerCamelCase )
return True
def UpperCamelCase ( self,__lowerCamelCase ):
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 193 | 1 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=None ) -> int:
a_ : Tuple = data
a_ : List[str] = previous
a_ : Optional[Any] = next_node
def __str__( self : Tuple ) -> str:
return F"""{self.data}"""
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.data
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return self.next
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
return self.previous
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str ) -> int:
a_ : Union[str, Any] = head
def __iter__( self : List[str] ) -> Union[str, Any]:
return self
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
if not self.current:
raise StopIteration
else:
a_ : Optional[Any] = self.current.get_data()
a_ : int = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] ) -> Tuple:
a_ : Union[str, Any] = None # First node in list
a_ : Tuple = None # Last node in list
def __str__( self : Dict ) -> Tuple:
a_ : Optional[int] = self.head
a_ : Union[str, Any] = []
while current is not None:
nodes.append(current.get_data() )
a_ : Optional[int] = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE__ ) for node in nodes )
def __contains__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
a_ : List[Any] = self.head
while current:
if current.get_data() == value:
return True
a_ : List[str] = current.get_next()
return False
def __iter__( self : List[Any] ) -> Optional[int]:
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
a_ : int = node
a_ : int = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> None:
a_ : List[Any] = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.set_tail(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
a_ : List[str] = node
a_ : Union[str, Any] = node.previous
if node.get_previous() is None:
a_ : List[str] = node_to_insert
else:
a_ : List[str] = node_to_insert
a_ : str = node_to_insert
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
a_ : int = node
a_ : Tuple = node.next
if node.get_next() is None:
a_ : Any = node_to_insert
else:
a_ : Tuple = node_to_insert
a_ : Optional[int] = node_to_insert
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
a_ : Optional[int] = 1
a_ : List[Any] = Node(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
current_position += 1
a_ : Any = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Node:
a_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
a_ : Dict = node.get_next()
raise Exception('Node not found' )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
if (node := self.get_node(SCREAMING_SNAKE_CASE__ )) is not None:
if node == self.head:
a_ : List[Any] = self.head.get_next()
if node == self.tail:
a_ : Tuple = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Node ) -> None:
if node.get_next():
a_ : Tuple = node.previous
if node.get_previous():
a_ : List[str] = node.next
a_ : int = None
a_ : Union[str, Any] = None
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return self.head is None
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
a_ : int = len(__A )
for _ in range(__A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a_ , a_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : int = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 120 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case : Optional[int] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__snake_case : Any = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
__snake_case : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class A__(a_ ):
"""simple docstring"""
_A : List[str] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_INIT_CONFIGURATION
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = RealmTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Tuple:
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
a_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
a_ : int = getattr(_lowercase , normalizer_state.pop("""type""" ) )
a_ : Union[str, Any] = do_lower_case
a_ : Any = strip_accents
a_ : str = tokenize_chinese_chars
a_ : str = normalizer_class(**_lowercase )
a_ : List[str] = do_lower_case
def UpperCamelCase__ ( self , _lowercase , **_lowercase ) -> int:
a_ : Optional[int] = PaddingStrategy.MAX_LENGTH
a_ : List[Any] = text
a_ : Optional[int] = kwargs.pop("""text_pair""" , _lowercase )
a_ : Union[str, Any] = kwargs.pop("""return_tensors""" , _lowercase )
a_ : Dict = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_lowercase ):
if batch_text_pair is not None:
a_ : Dict = batch_text_pair[idx]
else:
a_ : Union[str, Any] = None
a_ : int = super().__call__(_lowercase , _lowercase , return_tensors=_lowercase , **_lowercase )
a_ : int = encoded_candidates.get("""input_ids""" )
a_ : Any = encoded_candidates.get("""attention_mask""" )
a_ : Optional[int] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowercase )
a_ : Optional[Any] = {key: item for key, item in output_data.items() if len(_lowercase ) != 0}
return BatchEncoding(_lowercase , tensor_type=_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase=None ) -> int:
a_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
a_ : Optional[Any] = [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
a_ : List[str] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 248 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 299 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = MBartConfig
lowercase = {}
lowercase = "gelu"
def __init__( self : Dict , snake_case_ : str , snake_case_ : Tuple=13 , snake_case_ : Optional[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : List[str]=False , snake_case_ : Any=99 , snake_case_ : str=32 , snake_case_ : int=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=37 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.1 , snake_case_ : Dict=20 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]=1 , snake_case_ : Dict=0 , ):
snake_case__ : int = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : str = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : Tuple = bos_token_id
def lowerCamelCase ( self : Dict ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : List[Any] = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCamelCase ( self : int , snake_case_ : Dict , snake_case_ : str ):
snake_case__ : Union[str, Any] = TFMBartModel(config=snake_case_ ).get_decoder()
snake_case__ : Any = inputs_dict["""input_ids"""]
snake_case__ : Union[str, Any] = input_ids[:1, :]
snake_case__ : Any = inputs_dict["""attention_mask"""][:1, :]
snake_case__ : Optional[Any] = inputs_dict["""head_mask"""]
snake_case__ : Optional[int] = 1
# first forward pass
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ , snake_case__ : List[Any] = outputs.to_tuple()
snake_case__ : Tuple = past_key_values[1]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> str:
if attention_mask is None:
snake_case__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def lowerCamelCase ( self : str , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase ( self : str ):
snake_case__ : int = TFMBartModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowercase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowercase = "facebook/mbart-large-en-ro"
@cached_property
def lowerCamelCase ( self : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self : Union[str, Any] , **snake_case_ : int ):
snake_case__ : Tuple = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : Any ):
snake_case__ : Tuple = self.tokenizer(self.src_text , **snake_case_ , return_tensors="""tf""" )
snake_case__ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ : str = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCamelCase ( self : int ):
self._assert_generated_batch_equal_expected()
| 35 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = logging.get_logger(__name__)
class A ( enum.Enum ):
__snake_case = 0
__snake_case = 1
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
__snake_case = 'generated'
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = {}
if truncation is not None:
lowerCAmelCase_ = truncation
lowerCAmelCase_ = generate_kwargs
lowerCAmelCase_ = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ = self.tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowerCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0], UpperCamelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowerCAmelCase_ = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ = True
elif isinstance(args[0], UpperCamelCase__ ):
lowerCAmelCase_ = (prefix + args[0],)
lowerCAmelCase_ = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
lowerCAmelCase_ = self.tokenizer(*UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = super().__call__(*UpperCamelCase__, **UpperCamelCase__ )
if (
isinstance(args[0], UpperCamelCase__ )
and all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for el in args[0] )
and all(len(UpperCamelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self._parse_and_tokenize(UpperCamelCase__, truncation=UpperCamelCase__, **UpperCamelCase__ )
return inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowerCAmelCase_ = generate_kwargs.get('''min_length''', self.model.config.min_length )
lowerCAmelCase_ = generate_kwargs.get('''max_length''', self.model.config.max_length )
self.check_inputs(UpperCamelCase__, generate_kwargs['''min_length'''], generate_kwargs['''max_length'''] )
lowerCAmelCase_ = self.model.generate(**UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ = output_ids.reshape(UpperCamelCase__, out_b // in_b, *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ = tf.reshape(UpperCamelCase__, (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=ReturnType.TEXT, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ = {
f"{self.return_name}_text": self.tokenizer.decode(
UpperCamelCase__, skip_special_tokens=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__, )
}
records.append(UpperCamelCase__ )
return records
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
__snake_case = 'summary'
def __call__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
__snake_case = 'translation'
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, UpperCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE, UpperCamelCase__=None, UpperCamelCase__=None ):
"""simple docstring"""
if getattr(self.tokenizer, '''_build_translation_inputs''', UpperCamelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCamelCase__, return_tensors=self.framework, truncation=UpperCamelCase__, src_lang=UpperCamelCase__, tgt_lang=UpperCamelCase__ )
else:
return super()._parse_and_tokenize(*UpperCamelCase__, truncation=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = super()._sanitize_parameters(**UpperCamelCase__ )
if src_lang is not None:
lowerCAmelCase_ = src_lang
if tgt_lang is not None:
lowerCAmelCase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ = kwargs.get('''task''', self.task )
lowerCAmelCase_ = task.split('''_''' )
if task and len(UpperCamelCase__ ) == 4:
# translation, XX, to YY
lowerCAmelCase_ = items[1]
lowerCAmelCase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(*UpperCamelCase__, **UpperCamelCase__ )
| 167 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCamelCase ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def __UpperCamelCase ( _A , _A = None , _A = None , _A = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 167 | 1 |
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310 | 1 |
'''simple docstring'''
from math import isqrt, loga
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __a , __a ):
UpperCamelCase__ :List[Any] = False
return [i for i in range(2 , __a ) if is_prime[i]]
def a ( __a = 800800 , __a = 800800 ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = degree * loga(__a )
UpperCamelCase__ :Union[str, Any] = int(__a )
UpperCamelCase__ :List[str] = calculate_prime_numbers(__a )
UpperCamelCase__ :int = 0
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :List[str] = len(__a ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 219 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowercase :
a = BlenderbotConfig
a = {}
a = """gelu"""
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Any=13 , UpperCamelCase__: List[Any]=7 , UpperCamelCase__: str=True , UpperCamelCase__: Dict=False , UpperCamelCase__: Optional[Any]=99 , UpperCamelCase__: Dict=32 , UpperCamelCase__: int=2 , UpperCamelCase__: str=4 , UpperCamelCase__: Any=37 , UpperCamelCase__: str=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Dict=20 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: Optional[int]=0 , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[Any] = pad_token_id
lowerCamelCase__ : int = bos_token_id
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : int = prepare_blenderbot_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = TFBlenderbotModel(config=UpperCamelCase__ ).get_decoder()
lowerCamelCase__ : Optional[int] = inputs_dict["""input_ids"""]
lowerCamelCase__ : Any = input_ids[:1, :]
lowerCamelCase__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__ : str = inputs_dict["""head_mask"""]
lowerCamelCase__ : List[str] = 1
# first forward pass
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__ : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ) -> Any:
if attention_mask is None:
lowerCamelCase__ : Dict = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
a = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
a = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
a = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
a = True
a = False
a = False
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = TFBlenderbotModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
a = ["""My friends are cool but they eat too many carbs."""]
a = """facebook/blenderbot-400M-distill"""
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text , return_tensors="""tf""" )
lowerCamelCase__ : Tuple = self.model.generate(
model_inputs.input_ids , )
lowerCamelCase__ : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter image url: ''').strip()
print(f"""Downloading image from {url} ...""")
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 353 |
from math import factorial
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
a__: Optional[Any] =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__: str =float(factorial(__magic_name__ ) )
coefficient /= factorial(__magic_name__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 42 | 0 |
'''simple docstring'''
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
stooge(UpperCAmelCase , 0 , len(UpperCAmelCase ) - 1 )
return arr
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__A , __A = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__A = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase , i + t , (UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
if __name__ == "__main__":
a__ : Any = input("Enter numbers separated by a comma:\n").strip()
a__ : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 161 |
'''simple docstring'''
def snake_case ( UpperCAmelCase )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 161 | 1 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE = i + 1
else:
SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 193 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase : List[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowercase : Union[str, Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowercase : Tuple = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=False , __A=False , __A=True , __A=False , __A="dummy_doc" ) -> str:
_snake_case = {doc: key_lines}
_snake_case = {doc: sys_lines}
_snake_case = {}
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case , _snake_case = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
_snake_case = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
_snake_case , _snake_case = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
_snake_case = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
_snake_case , _snake_case = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_snake_case , _snake_case = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_snake_case = reader.get_mention_assignments(__A , __A )
_snake_case = reader.get_mention_assignments(__A , __A )
_snake_case = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , __A ) -> Any:
_snake_case = get_coref_infos(__A , __A , __A , __A , __A , __A )
_snake_case = {}
_snake_case = 0
_snake_case = 0
for name, metric in metrics:
_snake_case , _snake_case , _snake_case = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
_snake_case = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
_snake_case = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_snake_case = line.split()[5]
if not parse_col == "-":
_snake_case = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_snake_case = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_snake_case = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 42 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_lowerCamelCase : Optional[int] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_lowerCamelCase : int = format_type
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Optional[int] = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowerCamelCase_( _lowerCamelCase ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase_( _lowerCamelCase , **_lowerCamelCase ) -> Formatter:
'''simple docstring'''
_lowerCamelCase : str = get_format_type_from_alias(_lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 340 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 340 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : str=7 , UpperCamelCase_ : str=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : str=9_9 , UpperCamelCase_ : int=3_2 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : List[str]=1_6 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : int=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : Dict = 1_3
lowerCAmelCase : Tuple = 7
lowerCAmelCase : int = True
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : int = 9_9
lowerCAmelCase : Dict = 3_2
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 4
lowerCAmelCase : Union[str, Any] = 3_7
lowerCAmelCase : Optional[Any] = '''gelu'''
lowerCAmelCase : Any = 0.1
lowerCAmelCase : str = 0.1
lowerCAmelCase : List[str] = 5_1_2
lowerCAmelCase : Any = 1_6
lowerCAmelCase : Dict = 2
lowerCAmelCase : int = 0.02
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = 4
lowerCAmelCase : List[Any] = None
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerModel(config=_a )
lowerCAmelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase : List[Any] = [input_ids, input_mask]
lowerCAmelCase : List[str] = model(_a )
lowerCAmelCase : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = True
lowerCAmelCase : Tuple = TFRoFormerForCausalLM(config=_a )
lowerCAmelCase : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : str = model(_a )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM(config=_a )
lowerCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Tuple = self.num_labels
lowerCAmelCase : Optional[int] = TFRoFormerForSequenceClassification(config=_a )
lowerCAmelCase : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : Union[str, Any] = TFRoFormerForMultipleChoice(config=_a )
lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForTokenClassification(config=_a )
lowerCAmelCase : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : Any = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : str = TFRoFormerForQuestionAnswering(config=_a )
lowerCAmelCase : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_( A__ , A__ , unittest.TestCase ):
__UpperCamelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = TFRoFormerModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def lowerCamelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(_a )
@require_tf
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCAmelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : int = model(_a )[0]
# TODO Replace vocab size
lowerCAmelCase : Optional[int] = 5_0_0_0_0
lowerCAmelCase : Dict = [1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : str = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-4 )
@require_tf
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 1e-4
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = tf.constant([[4, 1_0]] )
lowerCAmelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : Union[str, Any] = emba(input_ids.shape )
lowerCAmelCase : Optional[Any] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowerCAmelCase : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
lowerCAmelCase : Optional[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 1e-4
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCAmelCase : List[Any] = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCAmelCase : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
lowerCAmelCase : Union[str, Any] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
lowerCAmelCase, lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowerCAmelCase : Tuple = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 60 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase : Union[str, Any] = TypeVar("KT")
lowerCamelCase : Dict = TypeVar("VT")
class A__ ( Generic[KT, VT] ):
def __init__( self : str , _a : KT | str = "root" , _a : VT | None = None ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =key
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =[]
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return f"Node({self.key}: {self.value})"
@property
def A ( self : int ) -> int:
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self : Optional[Any] , _a : float = 0.5 , _a : int = 16 ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Node[KT, VT]()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =p
_SCREAMING_SNAKE_CASE =max_level
def __str__( self : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self )
if len(_a ) == 0:
return f"SkipList(level={self.level})"
_SCREAMING_SNAKE_CASE =max((len(str(_a ) ) for item in items) , default=4 )
_SCREAMING_SNAKE_CASE =max(_a , 4 ) + 4
_SCREAMING_SNAKE_CASE =self.head
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =node.forward.copy()
lines.append(f"[{node.key}]".ljust(_a , '-' ) + '* ' * len(_a ) )
lines.append(' ' * label_size + '| ' * len(_a ) )
while len(node.forward ) != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
lines.append(
f"[{node.key}]".ljust(_a , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_a ) )
_SCREAMING_SNAKE_CASE =node.forward
lines.append('None'.ljust(_a ) + '* ' * len(_a ) )
return f"SkipList(level={self.level})\n" + "\n".join(_a )
def __iter__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_SCREAMING_SNAKE_CASE =node.forward[0]
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =1
while random() < self.p and level < self.max_level:
level += 1
return level
def A ( self : Any , _a : Any ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_SCREAMING_SNAKE_CASE =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def A ( self : Union[str, Any] , _a : KT ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
for i, update_node in enumerate(_a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_SCREAMING_SNAKE_CASE =node.forward[i]
else:
_SCREAMING_SNAKE_CASE =update_node.forward[:i]
def A ( self : Optional[Any] , _a : KT , _a : VT ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _a ):
update_vector.append(self.head )
_SCREAMING_SNAKE_CASE =level
_SCREAMING_SNAKE_CASE =Node(_a , _a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_a )
else:
_SCREAMING_SNAKE_CASE =new_node
def A ( self : List[str] , _a : VT ) -> VT | None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
return node.value
return None
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
_SCREAMING_SNAKE_CASE =skip_list.head
_SCREAMING_SNAKE_CASE ={}
while node.level != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
_SCREAMING_SNAKE_CASE =node.value
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
_SCREAMING_SNAKE_CASE =skip_list.head
_SCREAMING_SNAKE_CASE ={}
while node.level != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
_SCREAMING_SNAKE_CASE =node.value
if len(_UpperCamelCase ) != 4:
print()
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
assert skip_list.find('Some key' ) is None
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_UpperCamelCase : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
def is_sorted(_UpperCamelCase : str ):
return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) )
_SCREAMING_SNAKE_CASE =SkipList()
for i in range(10 ):
skip_list.insert(_UpperCamelCase , _UpperCamelCase )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCamelCase ) )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : Union[str, Any] = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = {}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
A : Union[str, Any] = probability
def _lowerCAmelCase ( self ):
return list(self.connections )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = 0
A : int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> dict[str, int]:
"""simple docstring"""
A : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Optional[Any] = Counter(graph.get_nodes() )
A : Optional[Any] = start
for _ in range(_lowerCAmelCase ):
A : Optional[Any] = graph.transition(_lowerCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
# fmt: off
A : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A : Union[str, Any] = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
A : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = self.get_rust_tokenizer()
A : Optional[int] = self.get_image_processor()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Tuple = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Tuple = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : str = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = self.prepare_image_inputs()
A : Optional[Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Any = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """lower newer"""
A : Union[str, Any] = processor(text=lowerCamelCase__, return_tensors="""np""" )
A : str = tokenizer(lowerCamelCase__, return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[str] = """lower newer"""
A : Any = self.prepare_image_inputs()
A : Tuple = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : str = """google/owlvit-base-patch32"""
A : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[int] = processor(text=lowerCamelCase__ )
A : Any = 16
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Tuple = """google/owlvit-base-patch32"""
A : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : int = [["""cat""", """nasa badge"""], ["""person"""]]
A : List[Any] = processor(text=lowerCamelCase__ )
A : Dict = 16
A : List[str] = len(lowerCamelCase__ )
A : List[str] = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = """google/owlvit-base-patch32"""
A : int = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = 16
A : Optional[Any] = inputs["""input_ids"""]
A : Optional[int] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = processor(images=lowerCamelCase__, query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[Any] = processor.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase ( unittest.TestCase ,A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = load_tool("text-question-answering" )
self.tool.setup()
snake_case : str = load_tool("text-question-answering" , remote=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 59 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__lowerCAmelCase ):
_UpperCAmelCase :Dict = ['''torch''', '''torchsde''']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 365 |
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowercase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__lowercase )
def snake_case__ ( self : Optional[int] , **__lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case_ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
snake_case_ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
snake_case_ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
snake_case_ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
snake_case_ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case_ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
snake_case_ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
snake_case_ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
snake_case_ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
snake_case_ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
snake_case_ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
snake_case_ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , __lowercase : List[str] , *__lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : List[str]=None , **__lowercase : Optional[Any] ):
"""simple docstring"""
return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase )
def snake_case__ ( self : str , __lowercase : int , __lowercase : List[str]=64 , __lowercase : int = 0 , __lowercase : float = 5_12 / 15_00 , __lowercase : Optional[int] = 32 , __lowercase : Optional[int] = 1 , ):
"""simple docstring"""
snake_case_ = load_image(__lowercase )
snake_case_ = self.image_processor.size["longest_edge"]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.generate_crop_boxes(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = self.image_processor(images=__lowercase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
snake_case_ = self.get_inference_context()
with inference_context():
snake_case_ = self._ensure_tensor_on_device(__lowercase , device=self.device )
snake_case_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
snake_case_ = image_embeddings
snake_case_ = grid_points.shape[1]
snake_case_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __lowercase , __lowercase ):
snake_case_ = grid_points[:, i : i + points_per_batch, :, :]
snake_case_ = input_labels[:, i : i + points_per_batch]
snake_case_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0.88 , __lowercase : Union[str, Any]=0.95 , __lowercase : int=0 , __lowercase : int=1 , ):
"""simple docstring"""
snake_case_ = model_inputs.pop("input_boxes" )
snake_case_ = model_inputs.pop("is_last" )
snake_case_ = model_inputs.pop("original_sizes" ).tolist()
snake_case_ = model_inputs.pop("reshaped_input_sizes" ).tolist()
snake_case_ = self.model(**__lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case_ = model_outputs["pred_masks"]
snake_case_ = self.image_processor.post_process_masks(
__lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase )
snake_case_ = model_outputs["iou_scores"]
snake_case_ , snake_case_ , snake_case_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case__ ( self : str , __lowercase : Any , __lowercase : Optional[int]=False , __lowercase : int=False , __lowercase : List[str]=0.7 , ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
snake_case_ = torch.cat(__lowercase )
snake_case_ = torch.cat(__lowercase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.post_process_for_mask_generation(
__lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = defaultdict(__lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowercase )
snake_case_ = {}
if output_rle_mask:
snake_case_ = rle_mask
if output_bboxes_mask:
snake_case_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 187 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict = DiTPipeline
snake_case__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ : str = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_0_0_0 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = AutoencoderKL()
__SCREAMING_SNAKE_CASE = DDIMScheduler()
__SCREAMING_SNAKE_CASE = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str=0 ) -> Dict:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__SCREAMING_SNAKE_CASE = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella", "white shark", "white wolf"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=4_0 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2_5 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 195 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = "ctrl"
snake_case__ : int = ["past_key_values"]
snake_case__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , UpperCAmelCase__ : Any=2_4_6_5_3_4 , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : Optional[int]=1_2_8_0 , UpperCAmelCase__ : Optional[Any]=8_1_9_2 , UpperCAmelCase__ : int=4_8 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=1E-6 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) -> str:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase__ )
| 195 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCamelCase : Union[str, Any] = (720, 1280) # Height, Width
_lowerCamelCase : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCamelCase : str = 1 / 100
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : Optional[Any] = """"""
_lowerCamelCase : Dict = 250
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ , A__ = get_dataset(lowercase_ , lowercase_ )
for index in range(lowercase_ ):
A__ = random.sample(range(len(lowercase_ ) ) , 4 )
A__ , A__ , A__ = update_image_and_anno(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , filter_scale=lowercase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ = random_chars(32 )
A__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
A__ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
A__ = []
for anno in new_annos:
A__ = anno[3] - anno[1]
A__ = anno[4] - anno[2]
A__ = anno[1] + width / 2
A__ = anno[2] + height / 2
A__ = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase_ )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[list, list]:
"""simple docstring"""
A__ = []
A__ = []
for label_file in glob.glob(os.path.join(lowercase_ , '''*.txt''' ) ):
A__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase_ ) as in_file:
A__ = in_file.readlines()
A__ = os.path.join(lowercase_ , f"""{label_name}.jpg""" )
A__ = []
for obj_list in obj_lists:
A__ = obj_list.rstrip('''\n''' ).split(''' ''' )
A__ = float(obj[1] ) - float(obj[3] ) / 2
A__ = float(obj[2] ) - float(obj[4] ) / 2
A__ = float(obj[1] ) + float(obj[3] ) / 2
A__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
A__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = int(scale_x * output_size[1] )
A__ = int(scale_y * output_size[0] )
A__ = []
A__ = []
for i, index in enumerate(lowercase_ ):
A__ = all_img_list[index]
path_list.append(lowercase_ )
A__ = all_annos[index]
A__ = cva.imread(lowercase_ )
if i == 0: # top-left
A__ = cva.resize(lowercase_ , (divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = bbox[2] * scale_y
A__ = bbox[3] * scale_x
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A__ = cva.resize(lowercase_ , (output_size[1] - divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = bbox[2] * scale_y
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A__ = cva.resize(lowercase_ , (divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = bbox[3] * scale_x
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A__ = cva.resize(
lowercase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A__ = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 14 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(__lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__UpperCamelCase = i + 1
else:
__UpperCamelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 368 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243 | 0 |
from itertools import permutations
def __snake_case ( _UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__a = [7, 11, 13, 17]
for i, test in enumerate(_UpperCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __snake_case ( _UpperCAmelCase = 10 ):
return sum(
int(''''''.join(map(_UpperCAmelCase , _UpperCAmelCase ) ) )
for num in permutations(range(_UpperCAmelCase ) )
if is_substring_divisible(_UpperCAmelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 49 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A:
'''simple docstring'''
def __init__( self : Any , A_ : Optional[Any] , A_ : Optional[Any]=13 , A_ : int=7 , A_ : int=True , A_ : int=True , A_ : str=True , A_ : int=True , A_ : Optional[Any]=99 , A_ : Union[str, Any]=32 , A_ : str=2 , A_ : int=4 , A_ : Any=37 , A_ : Union[str, Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Any=0.1 , A_ : Dict=512 , A_ : str=16 , A_ : List[str]=2 , A_ : Dict=0.02 , A_ : Any=3 , A_ : str=4 , A_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 384
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = 'gelu'
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 128
lowerCamelCase_ = 2
lowerCamelCase_ = 9
lowerCamelCase_ = 1
lowerCamelCase_ = None
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : str , A_ : Union[str, Any] , A_ : Any , A_ : Any , A_ : List[str] , A_ : Tuple , A_ : str , A_ : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFConvBertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : str , A_ : Tuple , A_ : List[str] , A_ : str , A_ : int , A_ : Union[str, Any] , A_ : Any , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFConvBertForMaskedLM(config=A_ )
lowerCamelCase_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Dict , A_ : Tuple , A_ : Tuple , A_ : Optional[int] , A_ : Dict , A_ : str , A_ : Union[str, Any] , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFConvBertForSequenceClassification(config=A_ )
lowerCamelCase_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , A_ : List[str] , A_ : int , A_ : Dict , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFConvBertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : Optional[int] , A_ : int , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFConvBertForTokenClassification(config=A_ )
lowerCamelCase_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Optional[Any] , A_ : Dict , A_ : Optional[int] , A_ : List[str] , A_ : str , A_ : Dict , A_ : str , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFConvBertForQuestionAnswering(config=A_ )
lowerCamelCase_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFConvBertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = True
if hasattr(A_ , 'use_cache' ):
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase_ = getattr(self.model_tester , 'key_length' , A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
lowerCamelCase_ = os.path.join(A_ , 'saved_model' , '1' )
lowerCamelCase_ = tf.keras.models.load_model(A_ )
lowerCamelCase_ = model(A_ )
if self.is_encoder_decoder:
lowerCamelCase_ = outputs['encoder_hidden_states']
lowerCamelCase_ = outputs['encoder_attentions']
else:
lowerCamelCase_ = outputs['hidden_states']
lowerCamelCase_ = outputs['attentions']
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(A_ )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase_ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase_ = getattr(self.model_tester , 'key_length' , A_ )
lowerCamelCase_ = getattr(self.model_tester , 'key_length' , A_ )
def check_decoder_attentions_output(A_ : Tuple ):
lowerCamelCase_ = len(A_ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase_ = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ : int ):
lowerCamelCase_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = [1, 6, 768]
self.assertEqual(output.shape , A_ )
lowerCamelCase_ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
| 208 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase : Any = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''rag'''
UpperCamelCase = True
def __init__( self : Optional[Any] , A_ : Optional[Any]=None , A_ : Any=True , A_ : Dict=None , A_ : Optional[int]=None , A_ : str=None , A_ : int=None , A_ : List[Any]=None , A_ : List[str]=" / " , A_ : Tuple=" // " , A_ : Union[str, Any]=5 , A_ : Optional[Any]=300 , A_ : int=768 , A_ : Dict=8 , A_ : int="wiki_dpr" , A_ : int="train" , A_ : List[str]="compressed" , A_ : Tuple=None , A_ : Optional[Any]=None , A_ : Optional[int]=False , A_ : str=False , A_ : Optional[Any]=0.0 , A_ : Union[str, Any]=True , A_ : List[Any]=False , A_ : Union[str, Any]=False , A_ : Dict=False , A_ : str=True , A_ : List[str]=None , **A_ : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase_ = kwargs.pop('question_encoder' )
lowerCamelCase_ = question_encoder_config.pop('model_type' )
lowerCamelCase_ = kwargs.pop('generator' )
lowerCamelCase_ = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ = AutoConfig.for_model(A_ , **A_ )
lowerCamelCase_ = AutoConfig.for_model(A_ , **A_ )
lowerCamelCase_ = reduce_loss
lowerCamelCase_ = label_smoothing
lowerCamelCase_ = exclude_bos_score
lowerCamelCase_ = do_marginalize
lowerCamelCase_ = title_sep
lowerCamelCase_ = doc_sep
lowerCamelCase_ = n_docs
lowerCamelCase_ = max_combined_length
lowerCamelCase_ = dataset
lowerCamelCase_ = dataset_split
lowerCamelCase_ = index_name
lowerCamelCase_ = retrieval_vector_size
lowerCamelCase_ = retrieval_batch_size
lowerCamelCase_ = passages_path
lowerCamelCase_ = index_path
lowerCamelCase_ = use_dummy_dataset
lowerCamelCase_ = output_retrieved
lowerCamelCase_ = do_deduplication
lowerCamelCase_ = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase_ = getattr(self.generator , 'forced_eos_token_id' , A_ )
@classmethod
def a__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : List[str] ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_ )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.question_encoder.to_dict()
lowerCamelCase_ = self.generator.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 208 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 286 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 , snake_case_ : int = 10 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = defaultdict(snake_case_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 106 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Any = ["""pixel_values"""]
def __init__(self : Any , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Dict , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase (self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(__a )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowercase (self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ):
UpperCAmelCase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def _lowercase (self : str , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Any , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : Dict , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : Dict , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a )
if not is_batched(__a ):
UpperCAmelCase_ = [images]
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 106 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_UpperCAmelCase : Tuple = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : List[str] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 215 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_ ), magnitude * sin(lowerCAmelCase_ )]
return [magnitude * cos(radians(lowerCAmelCase_ ) ), magnitude * sin(radians(lowerCAmelCase_ ) )]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1 )-> bool:
'''simple docstring'''
_UpperCAmelCase : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : float = sum(lowerCAmelCase_ )
return abs(lowerCAmelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
A_ : str = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
A_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A_ : List[str] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
A_ : Tuple = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A_ : Dict = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
A_ : Union[str, Any] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 215 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase__ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( a :List[str] , a :Any , a :int=None ) -> Tuple:
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(_snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
a = np.array(_snake_case , dtype=jnp.intaa ).reshape(_snake_case )
return output
def _a ( a :Optional[int] , a :List[Any]=None ) -> Any:
a = ids_tensor(_snake_case , vocab_size=2 , rng=_snake_case )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class lowercase_ :
'''simple docstring'''
__snake_case = None
__snake_case = ()
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs['''input_ids'''].shape[-1] // 2
a = inputs['''input_ids'''][:max_batch_size, :sequence_length]
a = jnp.ones_like(a_ )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(a_ , a_ )
a = pt_model_class(a_ ).eval()
a = load_flax_weights_in_pytorch_model(a_ , flax_model.params )
a = flax_model.generate(a_ ).sequences
a = pt_model.generate(torch.tensor(a_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(a_ )
a = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
a = jit(model.generate )
a = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
a = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
a = '''Hello world'''
a = tokenizer(a_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a_ , '''do_samples''' ):
model.generate(a_ , do_samples=a_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a_ , '''foo''' ):
a = {'''foo''': '''bar'''}
model.generate(a_ , **a_ )
| 352 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A : Any = logging.getLogger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """summarization"""
UpperCamelCase__ = ["""loss"""]
UpperCamelCase__ = ROUGE_KEYS
UpperCamelCase__ = """rouge2"""
def __init__( self : Optional[int] , __UpperCamelCase : Tuple , **__UpperCamelCase : List[str] )->Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(__UpperCamelCase , num_labels=__UpperCamelCase , mode=self.mode , **__UpperCamelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase = Path(self.output_dir ) / '''metrics.json'''
_UpperCAmelCase = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
_UpperCAmelCase = self.config.model_type
_UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
_UpperCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCAmelCase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
_UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase = get_git_info()['''repo_sha''']
_UpperCAmelCase = hparams.num_workers
_UpperCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __UpperCamelCase ):
_UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase = self.decoder_start_token_id
_UpperCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
_UpperCAmelCase = False
_UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase = self.model.config.max_length
_UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase__ ( self : int , __UpperCamelCase : Dict[str, torch.Tensor] )->Dict[str, List[str]]:
_UpperCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(__UpperCamelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
_UpperCAmelCase = True
return readable_batch
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] )->Any:
return self.model(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : List[int] )->Any:
_UpperCAmelCase = self.tokenizer.batch_decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return lmap(str.strip , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : dict )->Tuple:
_UpperCAmelCase = self.tokenizer.pad_token_id
_UpperCAmelCase , _UpperCAmelCase = batch['''input_ids'''], batch['''attention_mask''']
_UpperCAmelCase = batch['''labels''']
if isinstance(self.model , __UpperCamelCase ):
_UpperCAmelCase = self.model._shift_right(__UpperCamelCase )
else:
_UpperCAmelCase = shift_tokens_right(__UpperCamelCase , __UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase = decoder_input_ids
self.save_readable_batch(__UpperCamelCase )
_UpperCAmelCase = self(__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=__UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = label_smoothed_nll_loss(
__UpperCamelCase , __UpperCamelCase , self.hparams.label_smoothing , ignore_index=__UpperCamelCase )
return (loss,)
@property
def lowercase__ ( self : Union[str, Any] )->int:
return self.tokenizer.pad_token_id
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int )->Dict:
_UpperCAmelCase = self._step(__UpperCamelCase )
_UpperCAmelCase = dict(zip(self.loss_names , __UpperCamelCase ) )
# tokens per batch
_UpperCAmelCase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
_UpperCAmelCase = batch['''input_ids'''].shape[0]
_UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).sum()
_UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple )->Dict:
return self._generative_step(__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]="val" )->Dict:
self.step_count += 1
_UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase = losses['''loss''']
_UpperCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
_UpperCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase = torch.tensor(__UpperCamelCase ).type_as(__UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__UpperCamelCase )
_UpperCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_UpperCAmelCase = self.step_count
self.metrics[prefix].append(__UpperCamelCase ) # callback writes this to self.metrics_save_path
_UpperCAmelCase = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict )->Dict:
return calculate_rouge(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple , __UpperCamelCase : dict )->dict:
_UpperCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase = (time.time() - ta) / batch['''input_ids'''].shape[0]
_UpperCAmelCase = self.ids_to_clean_text(__UpperCamelCase )
_UpperCAmelCase = self.ids_to_clean_text(batch['''labels'''] )
_UpperCAmelCase = self._step(__UpperCamelCase )
_UpperCAmelCase = dict(zip(self.loss_names , __UpperCamelCase ) )
_UpperCAmelCase = self.calc_generative_metrics(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.mean(lmap(__UpperCamelCase , __UpperCamelCase ) )
base_metrics.update(gen_time=__UpperCamelCase , gen_len=__UpperCamelCase , preds=__UpperCamelCase , target=__UpperCamelCase , **__UpperCamelCase )
return base_metrics
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int )->Optional[Any]:
return self._generative_step(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] )->Optional[int]:
return self.validation_epoch_end(__UpperCamelCase , prefix='''test''' )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] )->SeqaSeqDataset:
_UpperCAmelCase = self.n_obs[type_path]
_UpperCAmelCase = self.target_lens[type_path]
_UpperCAmelCase = self.dataset_class(
self.tokenizer , type_path=__UpperCamelCase , n_obs=__UpperCamelCase , max_target_length=__UpperCamelCase , **self.dataset_kwargs , )
return dataset
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : bool = False )->DataLoader:
_UpperCAmelCase = self.get_dataset(__UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_sortish_sampler(__UpperCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=__UpperCamelCase , num_workers=self.num_workers , sampler=__UpperCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__UpperCamelCase , batch_sampler=__UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=__UpperCamelCase , num_workers=self.num_workers , sampler=__UpperCamelCase , )
def lowercase__ ( self : int )->DataLoader:
_UpperCAmelCase = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__UpperCamelCase )
return dataloader
def lowercase__ ( self : List[str] )->DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def lowercase__ ( self : Any )->DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] )->Dict:
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
add_generic_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
'''--max_source_length''' , default=1_0_2_4 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=5_6 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=1_4_2 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=1_4_2 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__UpperCamelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__UpperCamelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=__UpperCamelCase , default=__UpperCamelCase )
parser.add_argument('''--logger_name''' , type=__UpperCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=__UpperCamelCase , default=5_0_0 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=__UpperCamelCase , default='''summarization''' , required=__UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=__UpperCamelCase , default=0.0 , required=__UpperCamelCase )
parser.add_argument('''--src_lang''' , type=__UpperCamelCase , default='''''' , required=__UpperCamelCase )
parser.add_argument('''--tgt_lang''' , type=__UpperCamelCase , default='''''' , required=__UpperCamelCase )
parser.add_argument('''--eval_beams''' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase )
parser.add_argument(
'''--val_metric''' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=__UpperCamelCase , default=1 , required=__UpperCamelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """translation"""
UpperCamelCase__ = ["""loss"""]
UpperCamelCase__ = ["""bleu"""]
UpperCamelCase__ = """bleu"""
def __init__( self : List[Any] , __UpperCamelCase : str , **__UpperCamelCase : str )->Tuple:
super().__init__(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = hparams.src_lang
_UpperCAmelCase = hparams.tgt_lang
def lowercase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : int )->dict:
return calculate_bleu(__UpperCamelCase , __UpperCamelCase )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase = SummarizationModule(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = TranslationModule(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
_UpperCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = os.environ.get('''WANDB_PROJECT''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase = False
_UpperCAmelCase = args.val_metric == '''loss'''
_UpperCAmelCase = generic_train(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
_UpperCAmelCase = ''''''
_UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_SCREAMING_SNAKE_CASE ) )
if checkpoints:
_UpperCAmelCase = checkpoints[-1]
_UpperCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
__A : str = pl.Trainer.add_argparse_args(parser)
__A : Dict = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A : List[str] = parser.parse_args()
main(args)
| 260 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = (DPMSolverSinglestepScheduler,)
UpperCamelCase__ = (("""num_inference_steps""", 25),)
def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any:
_UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__UpperCamelCase )
return config
def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Any )->Union[str, Any]:
pass
def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict:
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]:
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 1_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = 5_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def lowercase__ ( self : Dict )->Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowercase__ ( self : str )->Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowercase__ ( self : Union[str, Any] )->int:
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def lowercase__ ( self : str )->str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Tuple:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def lowercase__ ( self : Dict )->List[str]:
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def lowercase__ ( self : Dict )->str:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowercase__ ( self : List[str] )->int:
self.check_over_configs(variance_type=__UpperCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def lowercase__ ( self : List[str] )->Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def lowercase__ ( self : int )->List[Any]:
_UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def lowercase__ ( self : Optional[Any] )->Dict:
_UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 1_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 260 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a : int = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 338 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : int = logging.get_logger(__name__)
a : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
a : Optional[int] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'whisper'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=51865 , A=80 , A=6 , A=4 , A=6 , A=4 , A=1536 , A=1536 , A=0.0 , A=0.0 , A=50257 , A=True , A=True , A="gelu" , A=256 , A=0.0 , A=0.0 , A=0.0 , A=0.0_2 , A=False , A=1500 , A=448 , A=50256 , A=50256 , A=50256 , A=None , A=[220, 50256] , A=False , A=256 , A=False , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=7 , **A , ) -> Optional[Any]:
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : int = decoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : int = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : List[str] = encoder_layers
UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Union[str, Any] = max_source_positions
UpperCAmelCase : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : List[str] = classifier_proj_size
UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : int = mask_time_prob
UpperCAmelCase : int = mask_time_length
UpperCAmelCase : Dict = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Optional[int] = mask_feature_length
UpperCAmelCase : int = mask_feature_min_masks
UpperCAmelCase : List[Any] = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class UpperCamelCase_ ( __magic_name__ ):
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : List[Any] = {0: """batch"""}
else:
UpperCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
return common_inputs
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 22050 , A = 5.0 , A = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Optional[int] = OrderedDict()
UpperCAmelCase : Any = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
UpperCAmelCase : List[str] = encoder_inputs["""input_features"""].shape[2]
UpperCAmelCase : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
UpperCAmelCase : List[str] = encoder_inputs.pop("""input_features""" )
UpperCAmelCase : Any = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase( self ) -> float:
return 1e-3
| 338 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 307 |
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : List[str] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : str ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : List[str] , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : Tuple , lowercase : Tuple ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main()
| 130 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 130 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : int = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = downstream_dict['''projector.weight''']
UpperCAmelCase_ : List[Any] = downstream_dict['''projector.bias''']
UpperCAmelCase_ : Tuple = downstream_dict['''model.post_net.linear.weight''']
UpperCAmelCase_ : Optional[int] = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
UpperCAmelCase_ : str = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = downstream_dict['''model.linear.weight''']
UpperCAmelCase_ : Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = downstream_dict['''connector.weight''']
UpperCAmelCase_ : Union[str, Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ : Tuple = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ : Any = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCAmelCase_ : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCAmelCase_ : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCAmelCase_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCAmelCase_ : Union[str, Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
UpperCAmelCase_ : str = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )
UpperCAmelCase_ : Optional[Any] = checkpoint['''Downstream''']
UpperCAmelCase_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
UpperCAmelCase_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
UpperCAmelCase_ : Dict = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith('''ForXVector''' ):
UpperCAmelCase_ : Optional[int] = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ : str = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
snake_case_ : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 125 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCAmelCase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ : Any = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ : int = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : Any = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = new_key.replace('''encoder''', '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : str = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = flax_dict[key]
UpperCAmelCase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Dict=False ) -> int:
UpperCAmelCase_ : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
UpperCAmelCase_ : List[str] = PixaStructVisionConfig()
UpperCAmelCase_ : List[str] = PixaStructTextConfig()
else:
UpperCAmelCase_ : Dict = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
UpperCAmelCase_ : Optional[int] = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
UpperCAmelCase_ : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCAmelCase_ : Tuple = PixaStructImageProcessor()
UpperCAmelCase_ : Optional[int] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
UpperCAmelCase_ : Union[str, Any] = 4096
UpperCAmelCase_ : Union[str, Any] = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
snake_case_ : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 125 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase = getLogger(__name__)
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int = 8 , snake_case__ : int = 1024 , snake_case__ : Any="val" , snake_case__ : int=None , snake_case__ : Optional[Any]=False , snake_case__ : Any="summarization" , snake_case__ : Dict=None , snake_case__ : List[Any]=1 , snake_case__ : Dict = None , snake_case__ : Optional[Any]="" , **snake_case__ : List[str] , ) -> Dict:
UpperCamelCase : Optional[Any] = str(snake_case__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=snake_case__ )
UpperCamelCase : Union[str, Any] = Path(snake_case__ )
UpperCamelCase : Optional[int] = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(snake_case__ )
UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).cuda()
if fpaa:
UpperCamelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case__ , snake_case__ ) # update config with task specific params
UpperCamelCase : Dict = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase : int = num_return_sequences
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(snake_case__ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCamelCase : Optional[int] = prefix or getattr(model.config , 'prefix' , '' ) or ''
UpperCamelCase : Dict = SeqaSeqDataset(
snake_case__ , snake_case__ , snake_case__ , max_target_length=1024 , type_path=snake_case__ , n_obs=snake_case__ , prefix=snake_case__ , **snake_case__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase : str = ds.make_sortish_sampler(snake_case__ , distributed=snake_case__ , add_extra_examples=snake_case__ , shuffle=snake_case__ )
UpperCamelCase : Optional[Any] = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=snake_case__ , collate_fn=ds.collate_fn )
UpperCamelCase : Optional[Any] = []
for batch in tqdm(snake_case__ ):
UpperCamelCase : Tuple = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=snake_case__ , num_beams=snake_case__ , **snake_case__ , )
UpperCamelCase : Tuple = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
UpperCamelCase : List[str] = batch['ids']
if num_return_sequences > 1:
UpperCamelCase : Any = chunks(snake_case__ , snake_case__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(snake_case__ , snake_case__ )
return results, sampler.num_replicas
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : List[str] = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=snake_case__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=snake_case__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=snake_case__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=snake_case__ , default=snake_case__ )
parser.add_argument(
'--type_path' , type=snake_case__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=snake_case__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=snake_case__ , default=8 , required=snake_case__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=snake_case__ , default=-1 , required=snake_case__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=snake_case__ , default=1 , required=snake_case__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=snake_case__ , default=600 , required=snake_case__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument('--tgt_lang' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
'--prefix' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
UpperCamelCase : str = time.time()
UpperCamelCase : str = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(snake_case__ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCamelCase : List[Any] = Path(args.save_dir + '_tmp' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) # this handles locking.
UpperCamelCase : List[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase : Union[str, Any] = {}
if args.src_lang is not None:
UpperCamelCase : Any = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase : str = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case__ )
UpperCamelCase : List[str] = eval_data_dir(
args.data_dir , snake_case__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case__ , **snake_case__ , )
if args.local_rank <= 0:
UpperCamelCase : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case__ )
UpperCamelCase : Tuple = gather_results_from_each_node(snake_case__ , snake_case__ , args.sync_timeout )
UpperCamelCase : Tuple = combine_partial_results(snake_case__ )
if args.num_return_sequences > 1:
UpperCamelCase : Tuple = save_dir.joinpath('pseudolabel_results.json' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(snake_case__ , snake_case__ )
return
UpperCamelCase : Tuple = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(snake_case__ ) as f:
UpperCamelCase : Any = [x.rstrip() for x in f.readlines()][: len(snake_case__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase : Tuple = 'translation' in args.task
UpperCamelCase : Any = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase : Tuple = 'bleu' if calc_bleu else 'rouge'
UpperCamelCase : Dict = score_fn(snake_case__ , snake_case__ )
UpperCamelCase : Optional[Any] = len(snake_case__ )
UpperCamelCase : str = time.time() - start_time
UpperCamelCase : Union[str, Any] = round(runtime / metrics['n_obs'] , 4 )
UpperCamelCase : Any = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase : Tuple = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
print(snake_case__ )
write_txt_file(snake_case__ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(snake_case__ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> List:
UpperCamelCase : Optional[int] = []
for partial_result in partial_results:
records.extend(snake_case__ )
UpperCamelCase : List[str] = sorted(snake_case__ , key=lambda snake_case__ : x["id"] )
UpperCamelCase : Dict = [x['pred'] for x in records]
return preds
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int , snake_case__ : Union[str, Any] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
UpperCamelCase : List[Any] = time.time()
logger.info('waiting for all nodes to finish' )
UpperCamelCase : Dict = None
while (time.time() - start_wait) < timeout:
UpperCamelCase : str = list(save_dir.glob('rank_*.json' ) )
if len(snake_case__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase : Optional[Any] = lmap(snake_case__ , snake_case__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 356 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 103 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A_ :
def lowercase ( self : str , snake_case_ : int ):
raise NotImplementedError()
def lowercase ( self : Any ):
raise NotImplementedError()
class A_ ( lowerCAmelCase_ ):
def __init__( self : str , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowercase ( self : Tuple , snake_case_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowercase ( self : Optional[int] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = ""
_UpperCAmelCase = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowercase ( self : Any , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="" if not stream_end else None )
def lowercase ( self : Optional[Any] , snake_case_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[Any] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : str ):
return self
def lowercase ( self : Any ):
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 22 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
a : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Tuple = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int , snake_case : Dict ):
"""simple docstring"""
a : List[Any] = dct.pop(snake_case )
a : Dict = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ):
"""simple docstring"""
a : Any = []
for k in state_dict.keys():
a : List[str] = k
if ".pwconv" in k:
a : Optional[int] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a : int = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a : Any = k_new.split('.' )
if ls[2].isdigit():
a : Any = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[int] ):
"""simple docstring"""
a : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a : Any = 1_000
a : Dict = 'huggingface/label-files'
a : Any = 'imagenet-1k-id2label.json'
a : str = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Dict = {int(snake_case ): v for k, v in idalabel.items()}
a : List[Any] = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a : int = [3, 3, 6, 4]
a : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a : str = [3, 3, 9, 6]
a : int = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a : List[str] = [4, 3, 10, 5]
a : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a : Any = [4, 4, 12, 6]
a : Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a : Any = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' , check_hash=snake_case )
else:
a : List[str] = torch.load(snake_case , map_location='cpu' )
a : List[Any] = checkpoint
a : List[str] = create_rename_keys(snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load HuggingFace model
a : Union[str, Any] = SwiftFormerForImageClassification(snake_case ).eval()
hf_model.load_state_dict(snake_case )
# prepare test inputs
a : List[Any] = prepare_img()
a : Dict = ViTImageProcessor.from_pretrained('preprocessor_config' )
a : str = processor(images=snake_case , return_tensors='pt' )
# compare outputs from both models
a : Any = get_expected_output(snake_case )
a : str = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
UpperCamelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 358 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase__ = namedtuple('covid_data', 'cases deaths recovered')
def lowerCAmelCase_ ( __A = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
UpperCAmelCase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__A ).content ).xpath(__A ) )
UpperCamelCase__ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 65 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]] ) ->bool:
A__ : Optional[int] = len(UpperCAmelCase__ )
# We need to create solution object to save path.
A__ : List[Any] = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
A__ : List[Any] = run_maze(UpperCAmelCase__, 0, 0, UpperCAmelCase__ )
if solved:
print("""\n""".join(str(UpperCAmelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : list[list[int]] ) ->bool:
A__ : List[Any] = len(UpperCAmelCase__ )
# Final check point.
if i == j == (size - 1):
A__ : Dict = 1
return True
A__ : List[str] = (not i < 0) and (not j < 0) # Check lower bounds
A__ : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A__ : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A__ : Any = 1
# check for directions
if (
run_maze(UpperCAmelCase__, i + 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j + 1, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
or run_maze(UpperCAmelCase__, UpperCAmelCase__, j - 1, UpperCAmelCase__ )
):
return True
A__ : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 330 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__lowerCamelCase = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowerCamelCase = text
def lowerCamelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowerCamelCase = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowerCamelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:]
__lowerCamelCase = model_inputs.pop('''conversation''' )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = model_outputs['''output_ids''']
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
__lowerCamelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 330 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
_lowerCamelCase = torch.nn.Linear(10 , 10 )
_lowerCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCamelCase = Accelerator()
_lowerCamelCase = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 80 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
_lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] =int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 80 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Any = [0] * len(__lowerCAmelCase )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
lowerCAmelCase : Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
lowerCAmelCase : List[Any] = queue.pop(0 )
cnt += 1
topo.append(__lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
if cnt != len(__lowerCAmelCase ):
print('Cycle exists' )
else:
print(__lowerCAmelCase )
# Adjacency List of Graph
__A : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 138 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , _UpperCamelCase ) -> List[str]:
super().__init__()
lowerCAmelCase_ = model
lowerCAmelCase_ = 2
lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self ) -> Tuple:
pass
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = LongformerModel.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = LightningModel(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 231 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __magic_name__ ( __a : Dict , __a : List[str] , __a : Union[str, Any]=8 ):
'''simple docstring'''
UpperCamelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if latents is None:
UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCamelCase__ = latents.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase__ = torch.device(F"cuda:{gpu_id}" )
UpperCamelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase__ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ (self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 4.0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase__ = hint.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.scheduler.timesteps
UpperCamelCase__ = self.movq.config.latent_channels
UpperCamelCase__ , UpperCamelCase__ = downscale_height_and_width(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"""image_embeds""": image_embeds, """hint""": hint}
UpperCamelCase__ = self.unet(
sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )[0]
# post-processing
UpperCamelCase__ = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 178 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A__ = MultilingualCLIP(UpperCamelCase )
A__ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase , )
A__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(UpperCamelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(UpperCamelCase )
else:
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
A__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase )
A__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = """a hat"""
A__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
A__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
A__ = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ = pipeline(
UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=9_9 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : Union[str, Any]=5 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : List[str]=3_6 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Union[str, Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=1_6 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : int=None , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[str] = use_input_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Any = num_labels
_UpperCAmelCase : List[Any] = num_choices
_UpperCAmelCase : Tuple = scope
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_config()
_UpperCAmelCase : Optional[int] = 3_0_0
return config
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = MraModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = MraModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_UpperCAmelCase : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
_UpperCAmelCase : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = MraForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = MraForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Dict = MraForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : List[str] = MraForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Optional[Any] = MraForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = False
UpperCamelCase_ : str = False
UpperCamelCase_ : Union[str, Any] = ()
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = MraModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = MraModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="MRA does not output attentions" )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : str = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase : Optional[Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )[0]
_UpperCAmelCase : Union[str, Any] = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase : Any = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase : str = model(lowerCAmelCase__ )[0]
_UpperCAmelCase : Union[str, Any] = 5_0_2_6_5
_UpperCAmelCase : Any = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_UpperCAmelCase : Dict = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )[0]
_UpperCAmelCase : Dict = 5_0_2_6_5
_UpperCAmelCase : List[str] = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 17 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[str] = model
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
# load longformer model from model identifier
_UpperCAmelCase : int = LongformerModel.from_pretrained(a_ )
_UpperCAmelCase : Any = LightningModel(a_ )
_UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a_ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> None:
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 32 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
'''simple docstring'''
import numpy as np
import datasets
UpperCamelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
UpperCamelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
UpperCamelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
'''simple docstring'''
A: int = np.array(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = np.array(SCREAMING_SNAKE_CASE_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
A: Dict = X - np.mean(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = np.cov(reference_distribution.T )
try:
A: Any = np.linalg.inv(SCREAMING_SNAKE_CASE_ )
except np.linalg.LinAlgError:
A: List[Any] = np.linalg.pinv(SCREAMING_SNAKE_CASE_ )
A: Dict = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Optional[int] = np.dot(SCREAMING_SNAKE_CASE_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 334 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger('''transformers.models.encodec''')
UpperCamelCase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCamelCase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCamelCase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCamelCase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCamelCase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase = []
UpperCamelCase = []
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
for attribute in key.split('''.''' ):
A: Union[str, Any] = getattr(__lowercase , __lowercase )
if weight_type is not None:
A: Tuple = getattr(__lowercase , __lowercase ).shape
else:
A: str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A: Dict = value
elif weight_type == "weight_g":
A: Tuple = value
elif weight_type == "weight_v":
A: Any = value
elif weight_type == "bias":
A: str = value
elif weight_type == "running_mean":
A: List[Any] = value
elif weight_type == "running_var":
A: Dict = value
elif weight_type == "num_batches_tracked":
A: List[str] = value
elif weight_type == "weight_ih_l0":
A: Dict = value
elif weight_type == "weight_hh_l0":
A: Optional[int] = value
elif weight_type == "bias_ih_l0":
A: List[Any] = value
elif weight_type == "bias_hh_l0":
A: str = value
elif weight_type == "weight_ih_l1":
A: Optional[int] = value
elif weight_type == "weight_hh_l1":
A: int = value
elif weight_type == "bias_ih_l1":
A: Optional[Any] = value
elif weight_type == "bias_hh_l1":
A: str = value
else:
A: Optional[int] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A , A: Any = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Any = []
if model_name == "encodec_24khz" or "encodec_32khz":
A: List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A: List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
A: Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A , A: Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
A: str = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A: Optional[Any] = True
if "*" in mapped_key:
A: Any = name.split(__lowercase )[0].split('''.''' )[-2]
A: Tuple = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
A: str = '''weight_g'''
elif "weight_v" in name:
A: List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
A: Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A: int = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A: Union[str, Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A: Tuple = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A: int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A: Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A: Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A: str = '''bias_hh_l1'''
elif "bias" in name:
A: Union[str, Any] = '''bias'''
elif "weight" in name:
A: Dict = '''weight'''
elif "running_mean" in name:
A: Tuple = '''running_mean'''
elif "running_var" in name:
A: Any = '''running_var'''
elif "num_batches_tracked" in name:
A: str = '''num_batches_tracked'''
else:
A: Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict:
if config_path is not None:
A: Tuple = EncodecConfig.from_pretrained(__lowercase )
else:
A: Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A: Union[str, Any] = [8, 5, 4, 4]
A: Dict = [2.2]
A: List[Any] = 6_4
A: Optional[Any] = 3_2_0_0_0
A: List[Any] = 2_0_4_8
A: Optional[Any] = False
A: int = False
A: Union[str, Any] = False
elif model_name == "encodec_48khz":
A: Optional[int] = [8, 5, 4, 2]
A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
A: List[Any] = 4_8_0_0_0
A: int = 2
A: List[Any] = False
A: Any = '''time_group_norm'''
A: Optional[Any] = True
A: Any = 1.0
A: Any = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
A: str = EncodecModel(__lowercase )
A: Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
A: Union[str, Any] = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A: Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 334 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=1000 , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = text_seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ = text_seq_length
__magic_name__ = (image_size // patch_size) ** 2 + 1
__magic_name__ = self.text_seq_length + self.image_seq_length
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ = bbox[i, j, 3]
__magic_name__ = bbox[i, j, 1]
__magic_name__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ = bbox[i, j, 2]
__magic_name__ = bbox[i, j, 0]
__magic_name__ = t
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
__magic_name__ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return True
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
__magic_name__ = torch.tensor([[1, 2]] )
__magic_name__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__magic_name__ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
__magic_name__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
lowercase__ : List[str] = random.randrange(3 , lowerCamelCase__ )
if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1:
continue
if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
print("Generating prime p..." )
lowercase__ : List[Any] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number.
lowercase__ : str = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p.
lowercase__ : List[str] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety.
lowercase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
lowercase__ : List[Any] = (key_size, e_a, e_a, p)
lowercase__ : List[Any] = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowercase__ , lowercase__ : Optional[int] = generate_key(lowerCamelCase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2_048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 121 |
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( lowerCamelCase__ = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return True
lowercase__ : str = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : List[Any] = character_freq_dict.get(lowerCamelCase__ , 0 ) + 1
lowercase__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
print("\nFor string = " , lowerCamelCase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCAmelCase__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 121 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 | 0 |
from typing import Any
def __lowerCamelCase ( lowerCAmelCase__ ):
if not input_list:
return []
lowerCAmelCase__ = [input_list.count(lowerCAmelCase__ ) for value in input_list]
lowerCAmelCase__ = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
from __future__ import annotations
lowerCAmelCase__ = tuple[int, int, int]
lowerCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase__ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowerCAmelCase__ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowerCAmelCase__ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowerCAmelCase__ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase__ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowerCAmelCase__ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowerCAmelCase__ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowerCAmelCase__ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowerCAmelCase__ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowerCAmelCase__ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
lowerCAmelCase__ = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
lowerCAmelCase__ = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def __lowerCamelCase ( lowerCAmelCase__ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Plugboard setting isn't type string ({type(lowerCAmelCase__ )})"""
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
lowerCAmelCase__ = F"""Odd number of symbols ({len(lowerCAmelCase__ )})"""
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ = F"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
lowerCAmelCase__ = F"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
lowerCAmelCase__ = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
lowerCAmelCase__ = pbstring[j + 1]
lowerCAmelCase__ = pbstring[j]
return pb
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (rotora, rotora, rotora) , lowerCAmelCase__ = "" , ):
lowerCAmelCase__ = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = 'This is my Python script that emulates the Enigma machine from WWII.'
lowerCAmelCase__ = (1, 1, 1)
lowerCAmelCase__ = 'pictures'
lowerCAmelCase__ = (rotora, rotora, rotora)
lowerCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 119 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = '''ZinengTang/tvlt-base'''
_a = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Optional[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> int:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.get_image_processor()
_a = self.get_feature_extractor()
_a = TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_feature_extractor()
_a = TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_a = np.ones([12000] )
_a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' )
_a = processor(audio=__UpperCAmelCase , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_feature_extractor()
_a = TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_a = np.ones([3, 224, 224] )
_a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
_a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.get_image_processor()
_a = self.get_feature_extractor()
_a = TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_a = np.ones([12000] )
_a = np.ones([3, 224, 224] )
_a = processor(audio=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_feature_extractor()
_a = TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 320 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 1 |
from maths.prime_factors import prime_factors
def snake_case_ ( snake_case ) -> int:
if not isinstance(snake_case , snake_case ):
lowercase__: Union[str, Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Dict = os.path.abspath(snake_case )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__: Optional[Any] = tf.train.list_variables(snake_case )
lowercase__: List[Any] = []
lowercase__: Tuple = []
lowercase__: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__: Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__: str = name[1:]
# figure out how many levels deep the name is
lowercase__: Optional[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
lowercase__: Optional[Any] = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(f'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
lowercase__: Any = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
lowercase__: Optional[int] = full_name.split('/' )
lowercase__: List[Any] = model
lowercase__: Any = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__: Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'embeddings' )
lowercase__: int = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__: int = getattr(snake_case , 'encoder' )
lowercase__: List[str] = getattr(snake_case , 'layer' )
lowercase__: Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__: Tuple = getattr(snake_case , 'pooler' )
lowercase__: Tuple = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__: Dict = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__: Optional[Any] = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
lowercase__: List[str] = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__: int = getattr(snake_case , 'attention' )
lowercase__: Union[str, Any] = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'attention' )
lowercase__: Optional[Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__: Optional[Any] = getattr(snake_case , 'attention' )
lowercase__: Optional[int] = getattr(snake_case , 'output' )
lowercase__: Optional[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__: Any = getattr(snake_case , 'output' )
lowercase__: str = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__: Tuple = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__: List[str] = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__: Optional[int] = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__: Any = getattr(snake_case , 'intermediate' )
lowercase__: str = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__: str = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__: Tuple = getattr(snake_case , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__: Any = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
lowercase__: Union[str, Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__: str = array.transpose()
if pointer.shape == array.shape:
lowercase__: Union[str, Any] = torch.from_numpy(snake_case )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__: int = BertConfig.from_json_file(snake_case )
lowercase__: Tuple = BertModel(snake_case )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 288 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000 ):
snake_case_, snake_case_ = 1, 1
snake_case_ = []
for i in range(1 , n + 1 ):
snake_case_ = prev_numerator + 2 * prev_denominator
snake_case_ = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE__ ) ) > len(str(SCREAMING_SNAKE_CASE__ ) ):
result.append(SCREAMING_SNAKE_CASE__ )
snake_case_ = numerator
snake_case_ = denominator
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( __lowerCAmelCase = "isbn/0140328726" )-> dict:
'''simple docstring'''
UpperCAmelCase : Tuple =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase : Any =f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCAmelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCAmelCase_ ( __lowerCAmelCase )-> dict:
'''simple docstring'''
UpperCAmelCase : int ={
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase : Tuple ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase : Optional[int] =[
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase : List[str] =data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : str =''', '''.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 78 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Any ='''rougeLsum'''
UpperCAmelCase : Optional[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
UpperCAmelCase : str =['''rouge1''', '''rouge2''', '''rougeL''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
UpperCAmelCase : Tuple =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : int =[
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCAmelCase : Any =[
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCAmelCase : Optional[Any] =[
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCAmelCase : Tuple =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 78 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 1_6
lowercase_ = 3_2
def a ( A__ : int , A__ : Optional[Any] = 16 ) -> Tuple:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('bert-base-cased' )
_lowercase =load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase =datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase =16
elif accelerator.mixed_precision != "no":
_lowercase =8
else:
_lowercase =None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ , drop_last=A__ )
_lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def a ( A__ : int , A__ : str ) -> int:
"""simple docstring"""
_lowercase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['lr']
_lowercase =int(config['num_epochs'] )
_lowercase =int(config['seed'] )
_lowercase =int(config['batch_size'] )
_lowercase =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowercase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase =batch_size // MAX_GPU_BATCH_SIZE
_lowercase =MAX_GPU_BATCH_SIZE
set_seed(A__ )
_lowercase =get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase =model.to(accelerator.device )
# Instantiate optimizer
_lowercase =AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
_lowercase =get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase =accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase =model(**A__ )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**A__ )
_lowercase =outputs.logits.argmax(dim=-1 )
_lowercase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
_lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
def a ( ) -> Dict:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowercase =parser.parse_args()
_lowercase ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 205 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = tuple[int, int, int]
SCREAMING_SNAKE_CASE__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE__ = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
SCREAMING_SNAKE_CASE__ = """FOBHMDKEXQNRAULPGSJVTYICZW"""
SCREAMING_SNAKE_CASE__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
SCREAMING_SNAKE_CASE__ = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE__ = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
SCREAMING_SNAKE_CASE__ = """SGLCPQWZHKXAREONTFBVIYJUDM"""
SCREAMING_SNAKE_CASE__ = """HVSICLTYKQUBXDWAJZOMFGPREN"""
SCREAMING_SNAKE_CASE__ = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
SCREAMING_SNAKE_CASE__ = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
SCREAMING_SNAKE_CASE__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT , __lowerCamelCase: str ):
'''simple docstring'''
if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:
lowercase_ = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(__lowerCamelCase )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
# Validates string and returns dict
lowercase_ = _plugboard(__lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = F'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})'
raise TypeError(__lowerCamelCase )
elif len(__lowerCamelCase ) % 2 != 0:
lowercase_ = F'Odd number of symbols ({len(__lowerCamelCase )})'
raise Exception(__lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'\'{i}\' not in list of symbols'
raise Exception(__lowerCamelCase )
elif i in tmppbl:
lowercase_ = F'Duplicate symbol ({i})'
raise Exception(__lowerCamelCase )
else:
tmppbl.add(__lowerCamelCase )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
__lowerCamelCase , __lowerCamelCase , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rb --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rc --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """This is my Python script that emulates the Enigma machine from WWII."""
SCREAMING_SNAKE_CASE__ = (1, 1, 1)
SCREAMING_SNAKE_CASE__ = """pictures"""
SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE__ = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 363 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[Any] = TFAutoModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : int = AutoModel.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Union[str, Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Any = TFAutoModelForPreTraining.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = AutoModelForPreTraining.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_, snake_case_ : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_, snake_case_ : List[Any] = AutoModelForCausalLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : str = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Tuple = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_, snake_case_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Tuple = AutoModelForMaskedLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_, snake_case_ : Dict = AutoModelForMaskedLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : str ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase_ , from_pt=lowercase_ )
snake_case_, snake_case_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ , from_tf=lowercase_ )
snake_case_, snake_case_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase_ , output_loading_info=lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Tuple = TFAutoModelForSequenceClassification.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : List[str] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ : List[str] = AutoModelForQuestionAnswering.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self : Any ):
snake_case_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
snake_case_ : Tuple = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
def _snake_case ( self : int ):
snake_case_ : str = TFAutoModelWithLMHead.from_pretrained(lowercase_ , from_pt=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
snake_case_ : Any = AutoModelWithLMHead.from_pretrained(lowercase_ , from_tf=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase_ ) , 14410 )
| 264 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 188 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase__ : Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase__ : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ConvBertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : List[Any]="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A: List[str] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_A: List[Any] = do_lower_case
_A: Optional[Any] = strip_accents
_A: Union[str, Any] = tokenize_chinese_chars
_A: Optional[int] = normalizer_class(**lowerCAmelCase_ )
_A: Optional[Any] = do_lower_case
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
_A: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Any = [self.sep_token_id]
_A: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
_A: str = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """M-CLIP"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=1024 , UpperCamelCase__ : Any=768 , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = transformerDimSize
SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_SCREAMING_SNAKE_CASE )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = MCLIPConfig
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = XLMRobertaModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.transformer(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_SCREAMING_SNAKE_CASE ), embs
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
'''simple docstring'''
def a__ ( ):
return 1
def a__ ( a__ ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a__ ( a__ ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(a__ )
def a__ ( a__ = 2_00 ):
return two_pound(a__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "donut-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=224 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=96 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : str=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[str]=4.0 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , **__SCREAMING_SNAKE_CASE : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
| 267 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
import datasets
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__SCREAMING_SNAKE_CASE = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__SCREAMING_SNAKE_CASE = object_detector(__SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@require_torch
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0.9985
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd"""
__SCREAMING_SNAKE_CASE = 0.9993
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 267 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=6 , UpperCAmelCase : Optional[int]=17 , UpperCAmelCase : Union[str, Any]=23 , UpperCAmelCase : List[str]=11 , UpperCAmelCase : Dict=True , ) -> List[str]:
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : Optional[int] = act_dim
lowerCamelCase__ : Tuple = state_dim
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Any = max_length
lowerCamelCase__ : Tuple = is_training
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCamelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCamelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase__ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase__ : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCamelCase__ : Tuple = random_attention_mask((self.batch_size, self.seq_length) )
lowerCamelCase__ : List[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A_ ( self : int ) -> Dict:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A_ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = config_and_inputs
lowerCamelCase__ : List[str] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase__ = ()
UpperCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> Any:
lowerCamelCase__ : str = DecisionTransformerModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> Dict:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCAmelCase )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : Dict = 2 # number of steps of autoregressive prediction we will perform
lowerCamelCase__ : Dict = 10 # defined by the RL environment, may be normalized
lowerCamelCase__ : List[str] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
lowerCamelCase__ : Dict = model.to(UpperCAmelCase )
lowerCamelCase__ : Dict = model.config
torch.manual_seed(0 )
lowerCamelCase__ : int = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowerCamelCase__ : Any = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCamelCase__ : Tuple = state
lowerCamelCase__ : int = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowerCamelCase__ : Any = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowerCamelCase__ : int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowerCamelCase__ : str = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowerCamelCase__ : Union[str, Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCamelCase__ : Optional[Any] = action_pred[0, -1]
lowerCamelCase__ : Optional[Any] = torch.cat([states, state] , dim=1 )
lowerCamelCase__ : int = returns_to_go[0, -1] - reward
lowerCamelCase__ : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCamelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 45 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_lowerCamelCase =True
from torch.cuda.amp import autocast
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase=None, lowerCamelCase=None ):
return field(default_factory=lambda: default, metadata=lowerCamelCase )
@dataclass
class A__ :
_UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_UpperCAmelCase : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
_UpperCAmelCase : Optional[float] = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
_UpperCAmelCase : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""})
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
_UpperCAmelCase : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class A__ :
_UpperCAmelCase : WavaVecaProcessor
_UpperCAmelCase : Union[bool, str] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self , __magic_name__ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowerCamelCase : str = [{"""input_values""": feature["""input_values"""]} for feature in features]
lowerCamelCase : Any = [{"""input_ids""": feature["""labels"""]} for feature in features]
lowerCamelCase : List[str] = self.processor.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
lowerCamelCase : Optional[Any] = self.processor.pad(
labels=__magic_name__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
lowerCamelCase : int = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
lowerCamelCase : Dict = labels
return batch
class A__ ( __SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
model.train()
lowerCamelCase : int = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
lowerCamelCase : str = self.compute_loss(__magic_name__ , __magic_name__ )
else:
lowerCamelCase : Any = self.compute_loss(__magic_name__ , __magic_name__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase : List[str] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
return loss.detach()
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""", lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCamelCase : Dict = datasets.load_dataset(
"""common_voice""", data_args.dataset_config_name, split=data_args.train_split_name )
lowerCamelCase : Optional[Any] = datasets.load_dataset("""common_voice""", data_args.dataset_config_name, split="""test""" )
# Create and save tokenizer
lowerCamelCase : Optional[Any] = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = re.sub(lowerCamelCase, """""", batch["""sentence"""] ).lower() + """ """
return batch
lowerCamelCase : int = train_dataset.map(lowerCamelCase, remove_columns=["""sentence"""] )
lowerCamelCase : Any = eval_dataset.map(lowerCamelCase, remove_columns=["""sentence"""] )
def extract_all_chars(lowerCamelCase ):
lowerCamelCase : int = """ """.join(batch["""text"""] )
lowerCamelCase : int = list(set(lowerCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCamelCase : Optional[int] = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, batch_size=-1, keep_in_memory=lowerCamelCase, remove_columns=train_dataset.column_names, )
lowerCamelCase : Tuple = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, batch_size=-1, keep_in_memory=lowerCamelCase, remove_columns=eval_dataset.column_names, )
lowerCamelCase : Union[str, Any] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
lowerCamelCase : Tuple = {v: k for k, v in enumerate(lowerCamelCase )}
lowerCamelCase : Optional[Any] = vocab_dict[""" """]
del vocab_dict[" "]
lowerCamelCase : str = len(lowerCamelCase )
lowerCamelCase : str = len(lowerCamelCase )
with open("""vocab.json""", """w""" ) as vocab_file:
json.dump(lowerCamelCase, lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase : Dict = WavaVecaCTCTokenizer(
"""vocab.json""", unk_token="""[UNK]""", pad_token="""[PAD]""", word_delimiter_token="""|""", )
lowerCamelCase : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCamelCase, return_attention_mask=lowerCamelCase )
lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase )
lowerCamelCase : Dict = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction="""mean""", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
lowerCamelCase : Optional[int] = min(len(lowerCamelCase ), data_args.max_train_samples )
lowerCamelCase : List[str] = train_dataset.select(range(lowerCamelCase ) )
if data_args.max_val_samples is not None:
lowerCamelCase : str = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCamelCase : Dict = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Optional[Any] = torchaudio.load(batch["""path"""] )
lowerCamelCase : Optional[Any] = resampler(lowerCamelCase ).squeeze().numpy()
lowerCamelCase : List[str] = 1_6000
lowerCamelCase : Any = batch["""text"""]
return batch
lowerCamelCase : int = train_dataset.map(
lowerCamelCase, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
lowerCamelCase : Optional[Any] = eval_dataset.map(
lowerCamelCase, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCamelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowerCamelCase : Any = processor(
audio=batch["""speech"""], text=batch["""target_text"""], sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCamelCase )
return batch
lowerCamelCase : Any = train_dataset.map(
lowerCamelCase, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, )
lowerCamelCase : Union[str, Any] = eval_dataset.map(
lowerCamelCase, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, )
# Metric
lowerCamelCase : List[str] = datasets.load_metric("""wer""" )
def compute_metrics(lowerCamelCase ):
lowerCamelCase : str = pred.predictions
lowerCamelCase : Optional[Any] = np.argmax(lowerCamelCase, axis=-1 )
lowerCamelCase : Union[str, Any] = processor.tokenizer.pad_token_id
lowerCamelCase : Optional[int] = processor.batch_decode(lowerCamelCase )
# we do not want to group tokens when computing the metrics
lowerCamelCase : Any = processor.batch_decode(pred.label_ids, group_tokens=lowerCamelCase )
lowerCamelCase : Union[str, Any] = wer_metric.compute(predictions=lowerCamelCase, references=lowerCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCamelCase : Dict = DataCollatorCTCWithPadding(processor=lowerCamelCase, padding=lowerCamelCase )
# Initialize our Trainer
lowerCamelCase : Tuple = CTCTrainer(
model=lowerCamelCase, data_collator=lowerCamelCase, args=lowerCamelCase, compute_metrics=lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase : str = model_args.model_name_or_path
else:
lowerCamelCase : int = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCamelCase : Tuple = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
lowerCamelCase : Tuple = train_result.metrics
lowerCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
lowerCamelCase : Tuple = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics("""train""", lowerCamelCase )
trainer.save_metrics("""train""", lowerCamelCase )
trainer.save_state()
# Evaluation
lowerCamelCase : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[Any] = trainer.evaluate()
lowerCamelCase : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase )
lowerCamelCase : List[str] = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics("""eval""", lowerCamelCase )
trainer.save_metrics("""eval""", lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowercase_ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase_ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(default=lowercase_ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase_ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowercase_ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase_ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowercase_ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 350 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = '''roformer'''
def __init__( self , snake_case=5_0000 , snake_case=None , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1536 , snake_case=2 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
snake_case_ = vocab_size
snake_case_ = hidden_size if embedding_size is None else embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = rotary_value
snake_case_ = use_cache
class lowercase ( lowercase_ ):
@property
def a ( self ):
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 200 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 384
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = 128
__a = 2
__a = 9
__a = 1
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertForMaskedLM(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFConvBertForSequenceClassification(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = TFConvBertForMultipleChoice(config=_a )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFConvBertForTokenClassification(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertForQuestionAnswering(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def __UpperCAmelCase ( self ):
__a = TFConvBertModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = True
if hasattr(_a , '''use_cache''' ):
__a = True
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _a )
for model_class in self.all_model_classes:
__a = self._prepare_for_class(_a , _a )
__a = model_class(_a )
__a = len(model(_a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
__a = os.path.join(_a , '''saved_model''' , '''1''' )
__a = tf.keras.models.load_model(_a )
__a = model(_a )
if self.is_encoder_decoder:
__a = outputs['''encoder_hidden_states''']
__a = outputs['''encoder_attentions''']
else:
__a = outputs['''hidden_states''']
__a = outputs['''attentions''']
self.assertEqual(len(_a ) , _a )
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_a ) , _a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCAmelCase ( self ):
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _a )
__a = getattr(self.model_tester , '''key_length''' , _a )
def check_decoder_attentions_output(_a ):
__a = len(_a )
self.assertEqual(out_len % 2 , 0 )
__a = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_a ):
__a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
__a = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 768]
self.assertEqual(output.shape , _a )
__a = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-4 )
| 45 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( _snake_case ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = 3_2 , __lowerCamelCase=PILImageResampling.BILINEAR , __lowerCamelCase = True , **__lowerCamelCase , ) -> None:
_SCREAMING_SNAKE_CASE : List[str] = do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
_SCREAMING_SNAKE_CASE : Union[str, Any] = size_divisor
_SCREAMING_SNAKE_CASE : Any = resample
super().__init__(**UpperCamelCase__ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ) -> np.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_SCREAMING_SNAKE_CASE : str = height // size_divisor * size_divisor
_SCREAMING_SNAKE_CASE : Union[str, Any] = width // size_divisor * size_divisor
_SCREAMING_SNAKE_CASE : List[Any] = resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
return image
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ) -> np.ndarray:
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> BatchFeature:
_SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Any = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : Tuple = size_divisor if size_divisor is not None else self.size_divisor
_SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : str = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
_SCREAMING_SNAKE_CASE : int = [self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : List[str] = [self.rescale(UpperCamelCase__ , scale=1 / 2_5_5 ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
_SCREAMING_SNAKE_CASE : int = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 363 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325 | 0 |
from __future__ import annotations
lowercase : Tuple = """Muhammad Umer Farooq"""
lowercase : List[str] = """MIT"""
lowercase : Any = """1.0.0"""
lowercase : str = """Muhammad Umer Farooq"""
lowercase : List[str] = """[email protected]"""
lowercase : Union[str, Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : list[str] = []
lowercase : Union[str, Any] = domain
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase : Tuple = parse.urljoin(self.domain ,snake_case )
self.urls.append(snake_case )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE__ ).split(""".""" )[-2:] )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return parse.urlparse(SCREAMING_SNAKE_CASE__ ).netloc
def _snake_case( SCREAMING_SNAKE_CASE__ = "https://github.com" ) -> list[str]:
lowercase : List[Any] = get_domain_name(SCREAMING_SNAKE_CASE__ )
# Initialize the parser
lowercase : List[str] = Parser(SCREAMING_SNAKE_CASE__ )
try:
# Open URL
lowercase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ )
# Get the valid email.
lowercase : Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Optional[Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 20 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 384
UpperCAmelCase__ : Any = 7
if "tiny" in model_name:
UpperCAmelCase__ : List[str] = 96
UpperCAmelCase__ : List[Any] = (2, 2, 6, 2)
UpperCAmelCase__ : List[str] = (3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase__ : List[Any] = 96
UpperCAmelCase__ : str = (2, 2, 18, 2)
UpperCAmelCase__ : int = (3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase__ : List[str] = 128
UpperCAmelCase__ : Any = (2, 2, 18, 2)
UpperCAmelCase__ : str = (4, 8, 16, 32)
UpperCAmelCase__ : Optional[int] = 12
UpperCAmelCase__ : Dict = 512
elif "large" in model_name:
UpperCAmelCase__ : int = 192
UpperCAmelCase__ : str = (2, 2, 18, 2)
UpperCAmelCase__ : Union[str, Any] = (6, 12, 24, 48)
UpperCAmelCase__ : Dict = 12
UpperCAmelCase__ : List[Any] = 768
# set label information
UpperCAmelCase__ : Optional[Any] = 150
UpperCAmelCase__ : Tuple = '''huggingface/label-files'''
UpperCAmelCase__ : Dict = '''ade20k-id2label.json'''
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Tuple = SwinConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , num_heads=UpperCamelCase__ , window_size=UpperCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
UpperCAmelCase__ : Dict = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Optional[int] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
UpperCAmelCase__ : Optional[Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Any = in_proj_weight[:dim, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[: dim]
UpperCAmelCase__ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ : str = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ : List[Any] = in_proj_weight[
-dim :, :
]
UpperCAmelCase__ : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = x.shape
UpperCAmelCase__ : Union[str, Any] = x.reshape(UpperCamelCase__ , 4 , in_channel // 4 )
UpperCAmelCase__ : List[str] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = x.shape
UpperCAmelCase__ : Any = x.reshape(UpperCamelCase__ , in_channel // 4 , 4 )
UpperCAmelCase__ : Tuple = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = x.shape[0]
UpperCAmelCase__ : Tuple = x.reshape(4 , in_channel // 4 )
UpperCAmelCase__ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : str = x.shape[0]
UpperCAmelCase__ : List[Any] = x.reshape(in_channel // 4 , 4 )
UpperCAmelCase__ : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
UpperCAmelCase__ : int = model_name_to_url[model_name]
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="cpu" , file_name=UpperCamelCase__ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
UpperCAmelCase__ : Optional[int] = get_upernet_config(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("bn" , "batch_norm" )
UpperCAmelCase__ : List[Any] = val
# rename keys
UpperCAmelCase__ : Optional[int] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase__ : Optional[Any] = reverse_correct_unfold_reduction_order(UpperCamelCase__ )
if "norm" in key:
UpperCAmelCase__ : Tuple = reverse_correct_unfold_norm_order(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
UpperCAmelCase__ : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
UpperCAmelCase__ : Optional[int] = SegformerImageProcessor()
UpperCAmelCase__ : List[Any] = processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase__ : str = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class a_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] ,snake_case : Dict[str, int] ,snake_case : List[str] ,snake_case : int = None ,snake_case : int = None ):
super().__init__()
SCREAMING_SNAKE_CASE =pad_token_id
SCREAMING_SNAKE_CASE =max_length
SCREAMING_SNAKE_CASE =vocab
SCREAMING_SNAKE_CASE =merges
SCREAMING_SNAKE_CASE =BytePairTokenizer(snake_case ,snake_case ,sequence_length=snake_case )
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : GPTaTokenizer ,*snake_case : List[str] ,**snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =[' '.join(snake_case ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE =tokenizer.get_vocab()
return cls(snake_case ,snake_case ,*snake_case ,**snake_case )
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ,snake_case : Union[str, os.PathLike] ,*snake_case : Optional[int] ,**snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =GPTaTokenizer.from_pretrained(snake_case ,*snake_case ,**snake_case )
return cls.from_tokenizer(snake_case ,*snake_case ,**snake_case )
@classmethod
def _lowerCAmelCase ( cls : Any ,snake_case : List[Any] ):
return cls(**snake_case )
def _lowerCAmelCase ( self : Any ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _lowerCAmelCase ( self : Dict ,snake_case : Any ,snake_case : int = None ):
SCREAMING_SNAKE_CASE =self.tf_tokenizer(snake_case )
SCREAMING_SNAKE_CASE =tf.ones_like(snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE =max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =pad_model_inputs(
snake_case ,max_seq_length=snake_case ,pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 334 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 4_2
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 3 , _lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCamelCase : Tuple[int] = (64,) , _lowerCamelCase : int = 1 , _lowerCamelCase : str = "silu" , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : float = 0.1_82_15 , _lowerCamelCase : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
A_ : Optional[Any] = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
A_ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
A_ : Tuple = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
A_ : Dict = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase )
A_ : str = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
# pass init params to Decoder
A_ : List[Any] = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def _a ( self : List[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ):
"""simple docstring"""
A_ : Any = self.encoder(_lowerCamelCase )
A_ : str = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def _a ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True ):
"""simple docstring"""
if not force_not_quantize:
A_ : Union[str, Any] = self.quantize(_lowerCamelCase )
else:
A_ : Tuple = h
A_ : int = self.post_quant_conv(_lowerCamelCase )
A_ : List[Any] = self.decoder(_lowerCamelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def _a ( self : Optional[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True ):
"""simple docstring"""
A_ : List[str] = sample
A_ : Dict = self.encode(_lowerCamelCase ).latents
A_ : Optional[Any] = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for e in env_keys:
lowerCAmelCase__ : List[Any] = int(os.environ.get(UpperCamelCase , -1 ) )
if val >= 0:
return val
return default
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = os.environ.get(UpperCamelCase , str(UpperCamelCase ) )
return strtobool(UpperCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase="no" ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = os.environ.get(UpperCamelCase , str(UpperCamelCase ) )
return value
| 37 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
lowerCAmelCase__ : Dict = xgb.predict(__UpperCAmelCase )
lowerCAmelCase__ : Any = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = fetch_california_housing()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = data_handling(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
lowerCAmelCase__ : Optional[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : Optional[int] = (("num_inference_steps", 25),)
def UpperCAmelCase(self : Optional[int] , **_A : List[Any] ) -> Any:
snake_case = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_A )
return config
def UpperCAmelCase(self : Dict , _A : Optional[int]=0 , **_A : List[str] ) -> Dict:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop("num_inference_steps" , _A )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config(**_A )
snake_case = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
snake_case = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case , snake_case = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
snake_case = scheduler.step(_A , _A , _A , **_A ).prev_sample
snake_case = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase(self : Union[str, Any] ) -> Any:
pass
def UpperCAmelCase(self : str , _A : int=0 , **_A : List[Any] ) -> Optional[int]:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop("num_inference_steps" , _A )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
snake_case = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case = scheduler.step(_A , _A , _A , **_A ).prev_sample
snake_case = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase(self : List[str] , _A : Tuple=None , **_A : Optional[int] ) -> str:
if scheduler is None:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**_A )
snake_case = scheduler_class(**_A )
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**_A )
snake_case = scheduler_class(**_A )
snake_case = 1_0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(_A , _A )
snake_case = scheduler.step(_A , _A , _A ).prev_sample
return sample
def UpperCAmelCase(self : Union[str, Any] ) -> Optional[Any]:
snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case = 5_0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(_A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case = model(_A , _A )
snake_case = scheduler.step(_A , _A , _A ).prev_sample
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def UpperCAmelCase(self : Union[str, Any] ) -> Optional[int]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCAmelCase(self : int ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case = self.full_loop(scheduler=_A )
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
snake_case = DEISMultistepScheduler.from_config(scheduler.config )
snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case = self.full_loop(scheduler=_A )
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def UpperCAmelCase(self : Tuple ) -> List[Any]:
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type="dpmsolver++" , solver_order=_A , solver_type=_A , )
def UpperCAmelCase(self : Tuple ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCAmelCase(self : Dict ) -> List[Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
snake_case = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def UpperCAmelCase(self : Any ) -> str:
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def UpperCAmelCase(self : Optional[int] ) -> str:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase(self : List[str] ) -> Optional[Any]:
self.check_over_configs(variance_type=_A )
self.check_over_configs(variance_type="learned_range" )
def UpperCAmelCase(self : List[str] ) -> Tuple:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def UpperCAmelCase(self : int ) -> Any:
snake_case = self.full_loop()
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def UpperCAmelCase(self : List[Any] ) -> Dict:
snake_case = self.full_loop(use_karras_sigmas=_A )
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def UpperCAmelCase(self : List[str] ) -> Any:
snake_case = self.full_loop(prediction_type="v_prediction" )
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_A )
snake_case = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def UpperCAmelCase(self : List[Any] ) -> List[str]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
snake_case = scheduler_class(**_A )
snake_case = 1_0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(_A , _A )
snake_case = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
| 137 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=A_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=A_ , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case = dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case = int(eval_result * len(A__ ) )
print(A__ )
snake_case = dataset.sort("probability" , reverse=A__ )
snake_case = dataset.select(range(A__ ) )
snake_case = dataset.remove_columns(["label", "probability"] )
snake_case = dataset.rename_column("prediction" , "label" )
snake_case = dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
snake_case = dataset.shuffle(seed=args.seed )
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A__ , index=A__ )
else:
dataset.to_json(A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , **A__ ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case = STModelArguments(model_name_or_path=A__ )
snake_case = STDataArguments(train_file=A__ , infer_file=A__ )
snake_case = STTrainingArguments(output_dir=A__ )
snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A__ ).items():
setattr(A__ , A__ , A__ )
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
# Sanity checks
snake_case = {}
snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case = args.train_file
snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case = args.eval_file
for key in data_files:
snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
snake_case = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case = F'{args.output_dir}/self-train_iter-{{}}'.format
snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A__ )
os.makedirs(A__ , exist_ok=A__ )
accelerator.wait_for_everyone()
snake_case = None
snake_case = None
snake_case = 0
snake_case = False
# Show the progress bar
snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case = data_dir_format(A__ )
assert os.path.exists(A__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case = os.path.join(A__ , "stage-1" )
snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A__ , A__ ):
arguments_dict.update({key: value} )
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case = os.path.join(A__ , "best-checkpoint" )
snake_case = os.path.join(A__ , "stage-2" )
# Update arguments_dict
snake_case = model_path
snake_case = data_files["train"]
snake_case = current_output_dir
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A__ )
snake_case = iteration
snake_case = data_dir_format(iteration + 1 )
snake_case = AutoConfig.from_pretrained(os.path.join(A__ , "best-checkpoint" ) )
snake_case = config.idalabel
snake_case = os.path.join(A__ , "eval_results_best-checkpoint.json" )
snake_case = os.path.join(A__ , "test_results_best-checkpoint.json" )
assert os.path.exists(A__ )
with open(A__ , "r" ) as f:
snake_case = float(json.load(A__ )[args.eval_metric] )
snake_case = os.path.join(A__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A__ )
# Loading the dataset from local csv or json files.
snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A__ , exist_ok=A__ )
shutil.copy(A__ , os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A__ ):
shutil.copy(A__ , os.path.join(A__ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A__ , A__ , A__ , A__ , A__ , A__ )
accelerator.wait_for_everyone()
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case = eval_result
if best_iteration is None:
snake_case = new_iteration
snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case = new_iteration
snake_case = new_eval_result
snake_case = 0
else:
if new_eval_result == best_eval_result:
snake_case = new_iteration
snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
| 137 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.